text stringlengths 8 6.05M |
|---|
#camel-banana problem
total: int=int(input('Enter no. of bananas at starting :'))
distance=int(input('Enter distance you want to cover :'))
load_capacity=int(input('Enter max load capacity of your camel :'))
lose=0
start=total
for i in range(distance):
while start>0:
start=start-load_capacity
#Here if condition is checking that camel doesn't move back if there is only one banana left.
if start==1:
lose=lose-1#Lose is decreased because if camel try to get remaining one banana he will lose one extra banana for covering that two miles.
#Here we are increasing lose because for moving backward and forward by one mile two bananas will be lose
lose=lose+2
#Here lose is decreased as in last trip camel will not go back.
lose=lose-1
start=total-lose
#print("start within for:", start)
if start==0:#Condition to check whether it is possible to take a single banana or not.
break
print("Maximum no of bananas delivered:",start)
# AI_Camel Python Program
|
from urllib import request
class spider():
url = 'https://www.panda.tv/cate/lol'
def __fetch_content(self):
r = request.urlopen(spider.url)
htmls = r.read()
def go(self):
self.__fetch_content()
spider = spider()
spider.go()
# |
__author__ = "Narwhale"
import random
def insert_sort(li):
for i in range(1,len(li)):
tem = li[i]
j = i - 1
while j >=0 and li[j] > tem:
li[j+1] = li[j]
j = j-1
li[j+1] = tem
data = list(range(1000))
random.shuffle(data)
insert_sort(data)
print(data) |
#!/usr/local/bin/python3
import sys
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from math import ceil
import os
# Geometry variables
# Width
W = 148.5
# Height
H = 112.5
# Depth
D = 112.5
# Triangle leg length
a = 12.7
l = (a ** 2 - 0.25 * a ** 2) ** 0.5 # Triangle height, do not change
# Offset of triangle 0
dx = 33.0
dy = -5.0
# Offset of triangle 1 with respect to 0
dx01 = 0#22.0 - l
dy01 = a / 2
# Offset of triangle 2 with respect to 1
dx12 = 0#22.0 - l
dy12 = a / 2
# Number of mesh cells
N = 3000000 #270000
# Something's messed up somewhere... This is just a workaround for now
N = N / 10;
# Number of layers in the depth direction
L = ceil(N ** (1 / 3))
# Spatial dimensions
magnitude = 0.001 # [mm]
# Other variables
# Inlet velocity
V_in = 0.7514
# Turbulence intensity
TI = 0.0158
def main():
os.system('clear')
print('Hi.\n\nI\'m creating the geometry...')
r = 2.0 * W / H # Aspect ratio
cells_y = ceil((N / L / r) ** (1 / 2))
cells_x = ceil(r * cells_y)
triangles_x, triangles_y = create_triangles(H, W, a, dx, dy, dx01, dy01, dx12, dy12)
boxes_x, boxes_y = create_boxes_new(H, W, triangles_x, triangles_y, a)
vertices = create_vertices_new(H, W, triangles_x, triangles_y, a)
probes = create_probes(triangles_x, triangles_y, dx01, dy01, dx12, dy12, D)
sys.argv.append('dummy')
if (sys.argv[1] != '-np'):
print('\nI\'m drawing a preview...')
plot_preview(H, W, triangles_x, triangles_y, boxes_x, boxes_y, vertices, probes)
accept = input('\nDo you accept the geometry? (y/n)\n\n')
if (accept != 'y'):
print('\nYou didn\'t accept the geometry. Aborting. Cya.')
plt.close()
sys.exit(0)
plt.close()
print('\nI\'m creating blockMeshDict...')
try:
file = open('blockMeshDict', 'w')
except:
print('\nSomething went wrong. I cannot create blockMeshDict.')
sys.exit(0)
file.write(header)
write_magnitude(file, magnitude)
write_vertices(file, vertices, D)
write_boxes(file, boxes_x, boxes_y, vertices, D, cells_x, cells_y)
write_edges(file)
write_boundary(file, triangles_x, triangles_y, boxes_x, boxes_y, vertices)
write_mergePatchPairs(file)
file.write(footer)
file.close()
print('\nI\'m creating the initial conditions for U...')
try:
file = open('U', 'w')
except:
print('\nSomething went wrong. I cannot create U.')
sys.exit(0)
file.write(initial_U)
file.close()
print('\nI\'m creating the initial conditions for p...')
try:
file = open('p', 'w')
except:
print('\nSomething went wrong. I cannot create p.')
sys.exit(0)
file.write(initial_p)
file.close()
print('\nI\'m creating the initial conditions for k...')
try:
file = open('k', 'w')
except:
print('\nSomething went wrong. I cannot create k.')
sys.exit(0)
file.write(initial_k)
file.close()
print('\nI\'m creating the initial conditions for nuSgs...')
try:
file = open('nuSgs', 'w')
except:
print('\nSomething went wrong. I cannot create nuSgs.')
sys.exit(0)
file.write(initial_nuSgs)
file.close()
print('\nI\'m creating the initial conditions for nuTilda...')
try:
file = open('nuTilda', 'w')
except:
print('\nSomething went wrong. I cannot create nuTilda.')
sys.exit(0)
file.write(initial_nuTilda)
file.close()
print('\nI\'m creating the file controlDict...')
try:
file = open('controlDict', 'w')
except:
print('\nSomething went wrong. I cannot create controlDict.')
sys.exit(0)
file.write(controlDict1)
write_probes(file, probes)
file.write(controlDict2)
file.close()
print('\nI\'m setting up the simulation directory...')
os.system('./organize.sh > /dev/null 2>&1')
print('\nI\'m done. Cya.')
'''
blocks
(
hex (0 1 11 10 14 15 25 24) (20 20 1)
simpleGrading
(
1
(
(0.2 0.3 4) // 20% y-dir, 30% cells, expansion = 4
(0.6 0.4 1) // 60% y-dir, 40% cells, expansion = 1
(0.2 0.3 0.25) // 20% y-dir, 30% cells, expansion = 0.25 (1/4)
)
1
)
);
'''
def create_probes(tri_x, tri_y, dx01, dy01, dx12, dy12, D):
probes = np.zeros((15, 3))
probes[0, :] = np.array([[(tri_x[0, 2] + tri_x[1, 0]) / 2, (tri_y[0, 2] + tri_y[1, 0]) / 2, D / 2]])
probes[1, :] = np.array([[(tri_x[1, 2] + tri_x[2, 0]) / 2, (tri_y[1, 2] + tri_y[2, 0]) / 2, D / 2]])
probes[2, :] = np.array([[tri_x[2, 2] + l / 2, tri_y[2, 2] + dy12 / 2, D / 2]])
probes[3, :] = np.array([[tri_x[2, 1] + l / 2, tri_y[2, 1] - dy12 / 2, D / 2]])
probes[4, :] = np.array([[(tri_x[1, 1] + tri_x[3, 2]) / 2 + dx12 / 2, (tri_y[1, 1] + tri_y[3, 2]) / 2, D / 2]])
probes[5, :] = np.array([[(tri_x[2, 1] + tri_x[4, 2]) / 2 + l / 2, (tri_y[2, 0] + tri_y[4, 0]) / 2, D / 2]])
probes[6, :] = np.array([[(tri_x[0, 1] + tri_x[3, 0]) / 2, (tri_y[0, 1] + tri_y[3, 0]) / 2, D / 2]])
probes[7, :] = np.array([[(tri_x[3, 1] + tri_x[4, 0]) / 2, (tri_y[3, 1] + tri_y[4, 0]) / 2, D / 2]])
probes[8, :] = np.array([[tri_x[4, 2] + l / 2, tri_y[4, 2] + dy12 / 2, D / 2]])
probes[9, :] = np.array([[tri_x[4, 1] + l / 2, tri_y[4, 1] - dy12 / 2, D / 2]])
probes[10, :] = np.array([[W - 20, tri_y[4, 2] + dy12 / 2, D / 2]])
probes[11, :] = np.array([[W - 20, tri_y[4, 1] - dy12 / 2, D / 2]])
probes[12, :] = np.array([[W - 20, tri_y[2, 2] + dy12 / 2, D / 2]])
probes[13, :] = np.array([[W - 20, tri_y[2, 1] - dy12 / 2, D / 2]])
probes[14, :] = np.array([[W - 20, (tri_y[2, 0] + tri_y[4, 0]) / 2, D / 2]])
return probes
def write_probes(file, probes):
probes *= magnitude;
for i in range(len(probes)):
s = '\t\t\t(' + str(probes[i, 0]) + ' ' + str(probes[i, 1]) + ' ' + str(probes[i, 2]) + ')\n'
file.write(s)
def refine_mesh(file, direction, l0, c0, e0, l1, c1, e1):
file.write('\tsimpleGrading\n\t(\n')
directions = ['x', 'y', 'z']
for i in directions:
if (i == direction):
s = '\t\t(\n'
s += '\t\t\t(' + str(l0) + ' ' + str(c0) + ' ' + str(e0) + ')\n'
s += '\t\t\t(' + str(l1) + ' ' + str(c1) + ' ' + str(e1) + ')\n'
s += '\t\t)\n'
# OpenFOAM 2.3.0 does not seem to support this type of mesh refinement
s = '\t\t' + str(e0) + '\n'
file.write(s)
else:
file.write('\t\t1\n')
file.write('\t)\n')
def find_index(vertex, vertices):
for i in range(len(vertices)):
if (np.array_equal(vertex, vertices[i, :])):
return i
def write_magnitude(file, magnitude):
file.write('\nconvertToMeters ' + str(magnitude) + ';\n')
def write_vertices(file, vertices, D):
file.write('\nvertices\n(\n')
for z in [0.0, D]:
for i in range(len(vertices)):
s = '\t(' + str(vertices[i, 0]) + '\t' + str(vertices[i, 1]) + '\t' + str(z) + ')'
if (z == 0.0):
s += '\t\t// Vertex ' + str(i) +'\n'
elif (z == D):
s += '\t\t// Vertex ' + str(i + len(vertices)) +'\n'
file.write(s)
file.write(');\n')
def write_boxes(file, boxes_x, boxes_y, vertices, D, cells_x, cells_y):
file.write('\nblocks\n(\n')
for i in range(len(boxes_x)):
# Protection against writing blocks with a zero surface area
if (not np.array_equal(boxes_x[i, 0], boxes_x[i, 1])):
s_front = '('
s_back = ''
for j in range(4):
vertex = np.hstack((boxes_x[i, j], boxes_y[i, j]))
index = find_index(vertex, vertices)
s_front += str(index) + ' '
s_back += str(index + len(vertices)) + ' '
s = '\thex ' + s_front + s_back + ')'
width = boxes_x[i, 1] - boxes_x[i, 0]
c_x = ceil(width / (W / cells_x))
c_y = ceil(cells_y / 4)
if (i in range(7)):
file.write(s + ' (' + str(c_x) + ' ' + str(c_y) + ' ' + str(L) + ')\t\t// Block ' + str(i) + '\n')
refine_mesh(file, 'y', 0.1, 0.75, 6, 0.9, 0.25, 6)
elif (i in range(7, 14)):
file.write(s + ' (' + str(c_x) + ' ' + str(c_y) + ' ' + str(L) + ')\t\t// Block ' + str(i) + '\n')
refine_mesh(file, 'y', 0.9, 0.25, 0.167, 0.1, 0.75, 0.167)
elif (i in [46, 54, 33, 40, 48, 37, 44, 52, 24, 29, 35, 42, 50]):
file.write(s + ' (' + str(c_x) + ' ' + str(2 * c_y) + ' ' + str(L) + ') simpleGrading (1 1 1)\t\t// Block ' + str(i) + '\n')
else:
file.write(s + ' (' + str(c_x) + ' ' + str(c_y) + ' ' + str(L) + ') simpleGrading (1 1 1)\t\t// Block ' + str(i) + '\n')
file.write(');\n')
def write_edges(file):
file.write('\nedges\n(\n);\n')
def write_boundary(file, triangles_x, triangles_y, boxes_x, boxes_y, vertices):
file.write('\nboundary\n(\n')
write_boundary_frontAndBack(file, boxes_x, boxes_y, vertices)
write_boundary_triangleWalls(file, triangles_x, triangles_y, vertices)
write_boundary_outsideWalls(file, boxes_x, boxes_y, vertices)
write_boundary_inlet(file, boxes_x, boxes_y, vertices)
write_boundary_outlet(file, boxes_x, boxes_y, vertices)
file.write(');\n')
def write_boundary_outsideWalls(file, boxes_x, boxes_y, vertices):
file.write('\toutsideWalls\n\t{\n\t\ttype wall;\n\t\tfaces\n\t\t(\n')
for i in range(len(boxes_x)):
# Check if the block is adjacent to either the upper or lower wall
if ((0 in boxes_y[i, :] or H in boxes_y[i, :]) and not np.array_equal(boxes_x[i, 0], boxes_x[i, 1])):
if (H in boxes_y[i, :]):
j = 2;
elif (0 in boxes_y[i, :]):
j = 0;
x = boxes_x[i, :]
y = boxes_y[i, :]
s = '('
vertex = np.hstack((x[j], y[j]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[j + 1], y[j + 1]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[j + 1], y[j + 1]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ' '
vertex = np.hstack((x[j], y[j]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ')'
file.write('\t\t\t' + s + '\t// Block ' + str(i) + '\n')
file.write('\t\t);\n\t}\n')
def write_boundary_triangleWalls(file, triangles_x, triangles_y, vertices):
file.write('\ttriangleWalls\n\t{\n\t\ttype wall;\n\t\tfaces\n\t\t(\n')
for i in range(len(triangles_x)):
x = np.hstack((triangles_x[i, :], triangles_x[i, 0]))
y = np.hstack((triangles_y[i, :], triangles_y[i, 0]))
for j in range(3):
s = '('
vertex = np.hstack((x[j], y[j]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[j + 1], y[j + 1]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[j + 1], y[j + 1]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ' '
vertex = np.hstack((x[j], y[j]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ')'
if (j == 0):
file.write('\t\t\t' + s + '\t// Triangle ' + str(i) + '\n')
else:
file.write('\t\t\t' + s + '\n')
file.write('\t\t);\n\t}\n')
def write_boundary_frontAndBack(file, boxes_x, boxes_y, vertices):
file.write('\tfrontAndBack\n\t{\n\t\ttype wall;\n\t\tfaces\n\t\t(\n')
for i in range(len(boxes_x)):
# Skip boundaries of boxes with a zero surface area
if (not np.array_equal(boxes_x[i, 0], boxes_x[i, 1])):
s_front = '('
s_back = '('
for j in range(4):
vertex = np.hstack((boxes_x[i, j], boxes_y[i, j]))
index = find_index(vertex, vertices)
s_front += str(index) + ' '
s_back += str(index + len(vertices)) + ' '
s = '\t\t\t' + s_front + ')\t// Block ' + str(i) + '\n\t\t\t' + s_back + ')\n'
file.write(s)
file.write('\t\t);\n\t}\n')
def write_boundary_inlet(file, boxes_x, boxes_y, vertices):
file.write('\tinlet\n\t{\n\t\ttype patch;\n\t\tfaces\n\t\t(\n')
for i in range(len(boxes_x)):
# Detect blocks adjacent to an inlet
if (boxes_x[i, 0] == 0 and not np.array_equal(boxes_x[i, 0], boxes_x[i, 1])):
x = boxes_x[i, :]
y = boxes_y[i, :]
s = '('
vertex = np.hstack((x[0], y[0]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[3], y[3]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[3], y[3]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ' '
vertex = np.hstack((x[0], y[0]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ')'
file.write('\t\t\t' + s + '\t// Block ' + str(i) + '\n')
file.write('\t\t);\n\t}\n')
def write_boundary_outlet(file, boxes_x, boxes_y, vertices):
file.write('\toutlet\n\t{\n\t\ttype patch;\n\t\tfaces\n\t\t(\n')
for i in range(len(boxes_x)):
# Detect blocks adjacent to an outlet
if (boxes_x[i, 1] == W):
x = boxes_x[i, :]
y = boxes_y[i, :]
s = '('
vertex = np.hstack((x[1], y[1]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[2], y[2]))
index = find_index(vertex, vertices)
s += str(index) + ' '
vertex = np.hstack((x[2], y[2]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ' '
vertex = np.hstack((x[1], y[1]))
index = find_index(vertex, vertices)
s += str(index + len(vertices)) + ')'
file.write('\t\t\t' + s + '\t// Block ' + str(i) + '\n')
file.write('\t\t);\n\t}\n')
def write_mergePatchPairs(file):
file.write('\nmergePatchPairs\n(\n);\n')
def plot_preview(H, W, triangles_x, triangles_y, boxes_x, boxes_y, vertices, probes):
plt.figure('Geometry Preview', figsize=(18, 9))
for i in range(len(boxes_x)):
# Hide blocks with a zero surface area as those will not be written to blockMeshDict either
if (not np.array_equal(boxes_x[i, 0], boxes_x[i, 1])):
x_center = np.mean(boxes_x[i, :])
y_center = np.mean(boxes_y[i, :])
plt.text(x_center, y_center, str(i), ha="center", va="center", fontsize=8)
x = np.hstack((boxes_x[i, :], boxes_x[i, 0]))
y = np.hstack((boxes_y[i, :], boxes_y[i, 0]))
plt.plot(x, y, 'c:', zorder=7)
for i in range(5):
x_center = np.mean(triangles_x[i, :])
y_center = np.mean(triangles_y[i, :])
plt.text(x_center, y_center, str(i), ha="center", va="center", fontsize=10)
x = np.hstack((triangles_x[i, :], triangles_x[i, 0]))
y = np.hstack((triangles_y[i, :], triangles_y[i, 0]))
plt.plot(x, y, 'b', zorder=8)
for i in range(len(vertices)):
x = vertices[i, 0]
y = vertices[i, 1]
#s = str(i) + ' (' + str(i + len(vertices)) + ')'
s = str(i)
plt.annotate(s, xy=(x, y), xytext=(0, 12), textcoords='offset points', fontsize=10, ha="center", va="center")
plt.scatter(x, y, zorder=10)
for i in range(len(probes)):
x = probes[i, 0]
y = probes[i, 1]
s = str(i) + ' (' + str(i + len(vertices)) + ')'
plt.scatter(x, y, marker='*', color='r', s=50, zorder=10)
plt.plot([0, W, W, 0, 0], [0, 0, H, H, 0], zorder=9)
plt.axis('equal')
plt.axis([-1, W + 1, -1, H + 1])
#plt.title('z = 0 (z = D)')
plt.xlabel('x [mm]')
plt.ylabel('y [mm]')
plt.savefig('preview.pdf', format = 'pdf', bbox_inches='tight')
plt.show(block=False)
def create_triangles(H, W, a, dx, dy, dx01, dy01, dx12, dy12):
triangles_x = np.zeros((5, 3))
triangles_y = np.zeros((5, 3))
l = (a ** 2 - 0.25 * a ** 2) ** 0.5
triangles_x[0, :] = np.array([[0, l, l]])
triangles_y[0, :] = np.array([[0, -a / 2, a / 2]])
triangles_x[1, :] = triangles_x[0, :] + l + dx01
triangles_y[1, :] = triangles_y[0, :] + a / 2 + dy01
triangles_x[2, :] = triangles_x[1, :] + l + dx12
triangles_y[2, :] = triangles_y[1, :] + a / 2 + dy12
triangles_x[3, :] = triangles_x[0, :] + l + dx01
triangles_y[3, :] = triangles_y[0, :] - a / 2 - dy01
triangles_x[4, :] = triangles_x[3, :] + l + dx12
triangles_y[4, :] = triangles_y[3, :] - a / 2 - dy12
triangles_x += dx
triangles_y += dy
triangles_y += H / 2
return triangles_x, triangles_y
def create_vertices_new(H, W, tri_x, tri_y, a):
vertices = np.zeros((74, 2))
# First column
vertices[0, :] = np.array([[0, H]])
vertices[1, :] = np.array([[0, tri_y[2, 0]]])
vertices[2, :] = np.array([[0, tri_y[1, 0]]])
vertices[3, :] = np.array([[0, tri_y[0, 0]]])
vertices[4, :] = np.array([[0, tri_y[3, 0]]])
vertices[5, :] = np.array([[0, tri_y[4, 0]]])
vertices[6, :] = np.array([[0, 0]])
# Second column
vertices[7, :] = np.array([[tri_x[0, 0], H]])
vertices[8, :] = np.array([[tri_x[0, 0], tri_y[2, 0]]])
vertices[9, :] = np.array([[tri_x[0, 0], tri_y[1, 0]]])
vertices[10, :] = np.array([[tri_x[0, 0], tri_y[0, 0]]])
vertices[11, :] = np.array([[tri_x[0, 0], tri_y[3, 0]]])
vertices[12, :] = np.array([[tri_x[0, 0], tri_y[4, 0]]])
vertices[13, :] = np.array([[tri_x[0, 0], 0]])
# Third column
vertices[14, :] = np.array([[tri_x[0, 1], H]])
vertices[15, :] = np.array([[tri_x[0, 1], tri_y[2, 0]]])
vertices[16, :] = np.array([[tri_x[0, 1], tri_y[1, 0]]])
vertices[17, :] = np.array([[tri_x[0, 1], tri_y[0, 2]]])
vertices[18, :] = np.array([[tri_x[0, 1], tri_y[0, 1]]])
vertices[19, :] = np.array([[tri_x[0, 1], tri_y[3, 0]]])
vertices[20, :] = np.array([[tri_x[0, 1], tri_y[4, 0]]])
vertices[21, :] = np.array([[tri_x[0, 1], 0]])
# Fourth column
vertices[22, :] = np.array([[tri_x[1, 0], H]])
vertices[23, :] = np.array([[tri_x[1, 0], tri_y[2, 0]]])
vertices[24, :] = np.array([[tri_x[1, 0], tri_y[1, 0]]])
vertices[25, :] = np.array([[tri_x[1, 0], tri_y[0, 2]]])
vertices[26, :] = np.array([[tri_x[1, 0], tri_y[0, 1]]])
vertices[27, :] = np.array([[tri_x[1, 0], tri_y[3, 0]]])
vertices[28, :] = np.array([[tri_x[1, 0], tri_y[4, 0]]])
vertices[29, :] = np.array([[tri_x[1, 0], 0]])
# Fifth column
h = (tri_y[1, 1] - tri_y[3, 2]) / 4
vertices[30, :] = np.array([[tri_x[1, 2], H]])
vertices[31, :] = np.array([[tri_x[1, 2], tri_y[2, 0]]])
vertices[32, :] = np.array([[tri_x[1, 2], tri_y[1, 2]]])
vertices[33, :] = np.array([[tri_x[1, 2], tri_y[1, 1]]])
vertices[34, :] = np.array([[tri_x[1, 2], tri_y[3, 2] + 3 * h]])
vertices[35, :] = np.array([[tri_x[1, 2], tri_y[3, 2] + 1 * h]])
vertices[36, :] = np.array([[tri_x[1, 2], tri_y[3, 2]]])
vertices[37, :] = np.array([[tri_x[1, 2], tri_y[3, 1]]])
vertices[38, :] = np.array([[tri_x[1, 2], tri_y[4, 0]]])
vertices[39, :] = np.array([[tri_x[1, 2], 0]])
# Sixth column
vertices[40, :] = np.array([[tri_x[2, 0], H]])
vertices[41, :] = np.array([[tri_x[2, 0], tri_y[2, 0]]])
vertices[42, :] = np.array([[tri_x[2, 0], tri_y[1, 2]]])
vertices[43, :] = np.array([[tri_x[2, 0], tri_y[1, 1]]])
vertices[44, :] = np.array([[tri_x[2, 0], tri_y[3, 2] + 3 * h]])
vertices[45, :] = np.array([[tri_x[2, 0], tri_y[3, 2] + 1 * h]])
vertices[46, :] = np.array([[tri_x[2, 0], tri_y[3, 2]]])
vertices[47, :] = np.array([[tri_x[2, 0], tri_y[3, 1]]])
vertices[48, :] = np.array([[tri_x[2, 0], tri_y[4, 0]]])
vertices[49, :] = np.array([[tri_x[2, 0], 0]])
# Seventh column
hh = (tri_y[2, 1] - tri_y[4, 2]) / 10
bb = tri_y[4, 2]
vertices[50, :] = np.array([[tri_x[2, 2], H]])
vertices[51, :] = np.array([[tri_x[2, 2], tri_y[2, 2]]])
vertices[52, :] = np.array([[tri_x[2, 2], tri_y[2, 1]]])
vertices[53, :] = np.array([[tri_x[2, 2], bb + 9 * hh]])
vertices[54, :] = np.array([[tri_x[2, 2], bb + 7 * hh]])
vertices[55, :] = np.array([[tri_x[2, 2], bb + 6 * hh]])
vertices[56, :] = np.array([[tri_x[2, 2], bb + 4 * hh]])
vertices[57, :] = np.array([[tri_x[2, 2], bb + 3 * hh]])
vertices[58, :] = np.array([[tri_x[2, 2], bb + 1 * hh]])
vertices[59, :] = np.array([[tri_x[2, 2], tri_y[4, 2]]])
vertices[60, :] = np.array([[tri_x[2, 2], tri_y[4, 1]]])
vertices[61, :] = np.array([[tri_x[2, 2], 0]])
# Eigth column
hhh = (tri_y[2, 2] - tri_y[4, 1]) / 14
bbb = tri_y[4, 1]
vertices[62, :] = np.array([[W, H]])
vertices[63, :] = np.array([[W, tri_y[2, 2]]])
vertices[64, :] = np.array([[W, bbb + 12 * hhh]])
vertices[65, :] = np.array([[W, bbb + 11 * hhh]])
vertices[66, :] = np.array([[W, bbb + 9 * hhh]])
vertices[67, :] = np.array([[W, bbb + 8 * hhh]])
vertices[68, :] = np.array([[W, bbb + 6 * hhh]])
vertices[69, :] = np.array([[W, bbb + 5 * hhh]])
vertices[70, :] = np.array([[W, bbb + 3 * hhh]])
vertices[71, :] = np.array([[W, bbb + 2 * hhh]])
vertices[72, :] = np.array([[W, tri_y[4, 1]]])
vertices[73, :] = np.array([[W, 0]])
# # Eigth column
# vertices[62, :] = np.array([[W, H]])
# vertices[63, :] = np.array([[W, tri_y[2, 2]]])
# vertices[64, :] = np.array([[W, tri_y[2, 1]]])
# vertices[65, :] = np.array([[W, tri_y[1, 2] - a/2]])
# vertices[66, :] = np.array([[W, tri_y[1, 1] + a/10]])
# vertices[67, :] = np.array([[W, tri_y[0, 2] - a/3]])
# vertices[68, :] = np.array([[W, tri_y[0, 1] + a/3]])
# vertices[69, :] = np.array([[W, tri_y[3, 2] - a/10]])
# vertices[70, :] = np.array([[W, tri_y[3, 1] + a/2]])
# vertices[71, :] = np.array([[W, tri_y[4, 2]]])
# vertices[72, :] = np.array([[W, tri_y[4, 1]]])
# vertices[73, :] = np.array([[W, 0]])
# Remove any doubly occuring vertices
delete = [];
for i in range(len(vertices)):
for j in range(i + 1, len(vertices)):
if (np.array_equal(vertices[i, :], vertices[j, :])):
delete.append(j - len(delete))
for i in delete:
vertices = np.delete(vertices, i, 0)
return vertices
def create_vertices(H, W, tri_x, tri_y):
vertices = np.zeros((42, 2))
# First column
vertices[0, :] = np.array([[0, H]])
vertices[1, :] = np.array([[0, tri_y[0, 0]]])
vertices[2, :] = np.array([[0, 0]])
# Second column
vertices[3, :] = np.array([[tri_x[0, 0], H]])
vertices[4, :] = np.array([[tri_x[0, 0], tri_y[0, 0]]])
vertices[5, :] = np.array([[tri_x[0, 0], 0]])
# Third column
vertices[6, :] = np.array([[tri_x[0, 2], H]])
vertices[7, :] = np.array([[tri_x[0, 2], tri_y[0, 2]]])
vertices[8, :] = np.array([[tri_x[0, 1], tri_y[0, 1]]])
vertices[9, :] = np.array([[tri_x[0, 1], 0]])
# Fourth column
vertices[10, :] = np.array([[tri_x[1, 0], H]])
vertices[11, :] = np.array([[tri_x[1, 0], tri_y[1, 0]]])
vertices[12, :] = np.array([[tri_x[3, 0], tri_y[3, 0]]])
vertices[13, :] = np.array([[tri_x[3, 0], 0]])
# Fifth column
vertices[14, :] = np.array([[tri_x[1, 2], H]])
vertices[15, :] = np.array([[tri_x[1, 2], tri_y[1, 2]]])
vertices[16, :] = np.array([[tri_x[1, 1], tri_y[1, 1]]])
vertices[17, :] = np.array([[tri_x[3, 2], tri_y[3, 2]]])
vertices[18, :] = np.array([[tri_x[3, 1], tri_y[3, 1]]])
vertices[19, :] = np.array([[tri_x[3, 1], 0]])
# Sixth column
vertices[20, :] = np.array([[tri_x[2, 0], H]])
vertices[21, :] = np.array([[tri_x[2, 0], tri_y[2, 0]]])
vertices[22, :] = np.array([[tri_x[2, 0], tri_y[1, 1]]])
vertices[23, :] = np.array([[tri_x[4, 0], tri_y[3, 2]]])
vertices[24, :] = np.array([[tri_x[4, 0], tri_y[4, 0]]])
vertices[25, :] = np.array([[tri_x[4, 0], 0]])
# Seventh column
vertices[26, :] = np.array([[tri_x[2, 2], H]])
vertices[27, :] = np.array([[tri_x[2, 2], tri_y[2, 2]]])
vertices[28, :] = np.array([[tri_x[2, 1], tri_y[2, 1]]])
vertices[29, :] = np.array([[tri_x[2, 1], tri_y[1, 1]]])
vertices[30, :] = np.array([[tri_x[4, 2], tri_y[3, 2]]])
vertices[31, :] = np.array([[tri_x[4, 2], tri_y[4, 2]]])
vertices[32, :] = np.array([[tri_x[4, 1], tri_y[4, 1]]])
vertices[33, :] = np.array([[tri_x[4, 1], 0]])
# Eigth column
vertices[34, :] = np.array([[W, H]])
vertices[35, :] = np.array([[W, tri_y[2, 2]]])
vertices[36, :] = np.array([[W, tri_y[2, 1]]])
vertices[37, :] = np.array([[W, tri_y[1, 1]]])
vertices[38, :] = np.array([[W, tri_y[3, 2]]])
vertices[39, :] = np.array([[W, tri_y[4, 2]]])
vertices[40, :] = np.array([[W, tri_y[4, 1]]])
vertices[41, :] = np.array([[W, 0]])
# Remove any doubly occuring vertices
delete = [];
for i in range(len(vertices)):
for j in range(i + 1, len(vertices)):
if (np.array_equal(vertices[i, :], vertices[j, :])):
delete.append(j - len(delete))
for i in delete:
vertices = np.delete(vertices, i, 0)
return vertices
def create_boxes_new(H, W, tri_x, tri_y, a):
boxes_x = np.zeros((55, 4))
boxes_y = np.zeros((55, 4))
# Top seven boxes
boxes_x[0, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[0, :] = np.array([[tri_y[2, 0], tri_y[2, 0], H, H]])
boxes_x[1, :] = np.array([[tri_x[0, 0], tri_x[0, 2], tri_x[0, 2], tri_x[0, 0]]])
boxes_y[1, :] = np.array([[tri_y[2, 0], tri_y[2, 0], H, H]])
boxes_x[2, :] = np.array([[tri_x[0, 2], tri_x[1, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[2, :] = np.array([[tri_y[2, 0], tri_y[2, 0], H, H]])
boxes_x[3, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[3, :] = np.array([[tri_y[2, 0], tri_y[2, 0], H, H]])
boxes_x[4, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[4, :] = np.array([[tri_y[2, 0], tri_y[2, 0], H, H]])
boxes_x[5, :] = np.array([[tri_x[2, 0], tri_x[2, 2], tri_x[2, 2], tri_x[2, 0]]])
boxes_y[5, :] = np.array([[tri_y[2, 0], tri_y[2, 2], H, H]])
boxes_x[6, :] = np.array([[tri_x[2, 2], W, W, tri_x[2, 2]]])
boxes_y[6, :] = np.array([[tri_y[2, 2], tri_y[2, 2], H, H]])
# Bottom seven boxes
boxes_x[7, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[7, :] = np.array([[0, 0, tri_y[4, 0], tri_y[4, 0]]])
boxes_x[8, :] = np.array([[tri_x[0, 0], tri_x[0, 1], tri_x[0, 1], tri_x[0, 0]]])
boxes_y[8, :] = np.array([[0, 0, tri_y[4, 0], tri_y[4, 0]]])
boxes_x[9, :] = np.array([[tri_x[0, 1], tri_x[3, 0], tri_x[3, 0], tri_x[0, 1]]])
boxes_y[9, :] = np.array([[0, 0, tri_y[4, 0], tri_y[4, 0]]])
boxes_x[10, :] = np.array([[tri_x[3, 0], tri_x[3, 1], tri_x[3, 1], tri_x[3, 0]]])
boxes_y[10, :] = np.array([[0, 0, tri_y[4, 0], tri_y[4, 0]]])
boxes_x[11, :] = np.array([[tri_x[3, 1], tri_x[4, 0], tri_x[4, 0], tri_x[3, 1]]])
boxes_y[11, :] = np.array([[0, 0, tri_y[4, 0], tri_y[4, 0]]])
boxes_x[12, :] = np.array([[tri_x[4, 0], tri_x[4, 1], tri_x[4, 1], tri_x[4, 0]]])
boxes_y[12, :] = np.array([[0, 0, tri_y[4, 1], tri_y[4, 0]]])
boxes_x[13, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 1]]])
boxes_y[13, :] = np.array([[0, 0, tri_y[4, 1], tri_y[4, 1]]])
# Leading 4 boxes
boxes_x[14, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[14, :] = np.array([[tri_y[1, 0], tri_y[1, 0], tri_y[2, 0], tri_y[2, 0]]])
boxes_x[15, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[15, :] = np.array([[tri_y[0, 0], tri_y[0, 0], tri_y[1, 0], tri_y[1, 0]]])
boxes_x[16, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[16, :] = np.array([[tri_y[3, 0], tri_y[3, 0], tri_y[0, 0], tri_y[0, 0]]])
boxes_x[17, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[17, :] = np.array([[tri_y[4, 0], tri_y[4, 0], tri_y[3, 0], tri_y[3, 0]]])
# 4 boxes in the column of triangle 0
boxes_x[18, :] = np.array([[tri_x[0, 0], tri_x[0, 2], tri_x[0, 2], tri_x[0, 0]]])
boxes_y[18, :] = np.array([[tri_y[1, 0], tri_y[1, 0], tri_y[2, 0], tri_y[2, 0]]])
boxes_x[19, :] = np.array([[tri_x[0, 0], tri_x[0, 2], tri_x[0, 2], tri_x[0, 0]]])
boxes_y[19, :] = np.array([[tri_y[0, 0], tri_y[0, 2], tri_y[1, 0], tri_y[1, 0]]])
boxes_x[20, :] = np.array([[tri_x[0, 0], tri_x[0, 1], tri_x[0, 1], tri_x[0, 0]]])
boxes_y[20, :] = np.array([[tri_y[3, 0], tri_y[3, 0], tri_y[0, 1], tri_y[0, 0]]])
boxes_x[21, :] = np.array([[tri_x[0, 0], tri_x[0, 1], tri_x[0, 1], tri_x[0, 0]]])
boxes_y[21, :] = np.array([[tri_y[4, 0], tri_y[4, 0], tri_y[3, 0], tri_y[3, 0]]])
# 5 boxes in the column between triangle 0 and traingle 1
boxes_x[22, :] = np.array([[tri_x[0, 2], tri_x[1, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[22, :] = np.array([[tri_y[1, 0], tri_y[1, 0], tri_y[2, 0], tri_y[2, 0]]])
boxes_x[23, :] = np.array([[tri_x[0, 2], tri_x[1, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[23, :] = np.array([[tri_y[0, 2], tri_y[0, 2], tri_y[1, 0], tri_y[1, 0]]])
boxes_x[24, :] = np.array([[tri_x[0, 1], tri_x[3, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[24, :] = np.array([[tri_y[0, 1], tri_y[0, 1], tri_y[0, 2], tri_y[0, 2]]])
boxes_x[25, :] = np.array([[tri_x[0, 2], tri_x[3, 0], tri_x[3, 0], tri_x[0, 2]]])
boxes_y[25, :] = np.array([[tri_y[3, 0], tri_y[3, 0], tri_y[0, 1], tri_y[0, 1]]])
boxes_x[26, :] = np.array([[tri_x[0, 2], tri_x[3, 0], tri_x[3, 0], tri_x[0, 2]]])
boxes_y[26, :] = np.array([[tri_y[4, 0], tri_y[4, 0], tri_y[3, 0], tri_y[3, 0]]])
# 5 boxes in the column of triangle 1
h = (tri_y[1, 1] - tri_y[3, 2]) / 4
boxes_x[27, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[27, :] = np.array([[tri_y[1, 0], tri_y[1, 2], tri_y[2, 0], tri_y[2, 0]]])
boxes_x[28, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[28, :] = np.array([[tri_y[0, 2], tri_y[3, 2] + 3 * h, tri_y[1, 1], tri_y[1, 0]]])
boxes_x[29, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[29, :] = np.array([[tri_y[0, 1], tri_y[3, 2] + 1 * h, tri_y[3, 2] + 3 * h, tri_y[0, 2]]])
boxes_x[30, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[30, :] = np.array([[tri_y[3, 0], tri_y[3, 2], tri_y[3, 2] + 1 * h, tri_y[0, 1]]])
boxes_x[31, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[31, :] = np.array([[tri_y[4, 0], tri_y[4, 0], tri_y[3, 1], tri_y[3, 0]]])
# 7 boxes in the column between triangle 1 and triangle 2
boxes_x[32, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[32, :] = np.array([[tri_y[1, 2], tri_y[1, 2], tri_y[2, 0], tri_y[2, 0]]])
boxes_x[33, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[33, :] = np.array([[tri_y[1, 1], tri_y[1, 1], tri_y[1, 2], tri_y[1, 2]]])
boxes_x[34, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[34, :] = np.array([[tri_y[3, 2] + 3 * h, tri_y[3, 2] + 3 * h, tri_y[1, 1], tri_y[1, 1]]])
boxes_x[35, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[35, :] = np.array([[tri_y[3, 2] + 1 * h, tri_y[3, 2] + 1 * h, tri_y[3, 2] + 3 * h, tri_y[3, 2] + 3 * h]])
boxes_x[36, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[36, :] = np.array([[tri_y[3, 2], tri_y[3, 2], tri_y[3, 2] + 1 * h, tri_y[3, 2] + 1 * h]])
boxes_x[37, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[37, :] = np.array([[tri_y[3, 1], tri_y[3, 1], tri_y[3, 2], tri_y[3, 2]]])
boxes_x[38, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[38, :] = np.array([[tri_y[4, 0], tri_y[4, 0], tri_y[3, 1], tri_y[3, 1]]])
# 7 boxes in the column of triangle 2
hh = (tri_y[2, 1] - tri_y[4, 2]) / 10
bb = tri_y[4, 2]
boxes_x[39, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[39, :] = np.array([[tri_y[1, 2], bb + 9 * hh, tri_y[2, 1], tri_y[2, 0]]])
boxes_x[40, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[40, :] = np.array([[tri_y[1, 1], bb + 7 * hh, bb + 9 * hh, tri_y[1, 2]]])
boxes_x[41, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[41, :] = np.array([[tri_y[3, 2] + 3 * h, bb + 6 * hh, bb + 7 * hh, tri_y[1, 1]]])
boxes_x[42, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[42, :] = np.array([[tri_y[3, 2] + 1 * h, bb + 4 * hh, bb + 6 * hh, tri_y[3, 2] + 3 * h]])
boxes_x[43, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[43, :] = np.array([[tri_y[3, 2], bb + 3 * hh, bb + 4 * hh, tri_y[3, 2] + 1 * h]])
boxes_x[44, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[44, :] = np.array([[tri_y[3, 1], bb + 1 * hh, bb + 3 * hh, tri_y[3, 2]]])
boxes_x[45, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[45, :] = np.array([[tri_y[4, 0], tri_y[4, 2], bb + 1 * hh, tri_y[3, 1]]])
# Trailing 9 boxes
hh = (tri_y[2, 1] - tri_y[4, 2]) / 10
bb = tri_y[4, 2]
hhh = (tri_y[2, 2] - tri_y[4, 1]) / 14
bbb = tri_y[4, 1]
boxes_x[46, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 2]]])
boxes_y[46, :] = np.array([[tri_y[2, 1], bbb + 12 * hhh, tri_y[2, 2], tri_y[2, 2]]])
boxes_x[47, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 2]]])
boxes_y[47, :] = np.array([[bb + 9 * hh, bbb + 11 * hhh, bbb + 12 * hhh, tri_y[2, 1]]])
boxes_x[48, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 1]]])
boxes_y[48, :] = np.array([[bb + 7 * hh, bbb + 9 * hhh, bbb + 11 * hhh, bb + 9 * hh]])
boxes_x[49, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 1]]])
boxes_y[49, :] = np.array([[bb + 6 * hh, bbb + 8 * hhh, bbb + 9 * hhh, bb + 7 * hh]])
boxes_x[50, :] = np.array([[tri_x[4, 2], W, W, tri_x[2, 1]]])
boxes_y[50, :] = np.array([[bb + 4 * hh, bbb + 6 * hhh, bbb + 8 * hhh, bb + 6 * hh]])
boxes_x[51, :] = np.array([[tri_x[4, 2], W, W, tri_x[4, 2]]])
boxes_y[51, :] = np.array([[bb + 3 * hh, bbb + 5 * hhh, bbb + 6 * hhh, bb + 4 * hh]])
boxes_x[52, :] = np.array([[tri_x[4, 2], W, W, tri_x[4, 2]]])
boxes_y[52, :] = np.array([[bb + 1 * hh, bbb + 3 * hhh, bbb + 5 * hhh, bb + 3 * hh]])
boxes_x[53, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 2]]])
boxes_y[53, :] = np.array([[tri_y[4, 2], bbb + 2 * hhh, bbb + 3 * hhh, bb + 1 * hh]])
boxes_x[54, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 2]]])
boxes_y[54, :] = np.array([[tri_y[4, 1], tri_y[4, 1], bbb + 2 * hhh, tri_y[4, 2]]])
# boxes_x[46, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 2]]])
# boxes_y[46, :] = np.array([[tri_y[2, 1], tri_y[2, 1], tri_y[2, 2], tri_y[2, 2]]])
# boxes_x[47, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 2]]])
# boxes_y[47, :] = np.array([[tri_y[1, 2] - a/2, tri_y[1, 2] - a/2, tri_y[2, 1], tri_y[2, 1]]])
# boxes_x[48, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 1]]])
# boxes_y[48, :] = np.array([[tri_y[1, 1] + a/10, tri_y[1, 1] + a/10, tri_y[1, 2] - a/2, tri_y[1, 2] - a/2]])
# boxes_x[49, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 1]]])
# boxes_y[49, :] = np.array([[tri_y[0, 2] - a/3, tri_y[0, 2] - a/3, tri_y[1, 1] + a/10, tri_y[1, 1] + a/10]])
# boxes_x[50, :] = np.array([[tri_x[4, 2], W, W, tri_x[2, 1]]])
# boxes_y[50, :] = np.array([[tri_y[0, 1] + a/3, tri_y[0, 1] + a/3, tri_y[0, 2] - a/3, tri_y[0, 2] - a/3]])
# boxes_x[51, :] = np.array([[tri_x[4, 2], W, W, tri_x[4, 2]]])
# boxes_y[51, :] = np.array([[tri_y[3, 2] - a/10, tri_y[3, 2] - a/10, tri_y[0, 1] + a/3, tri_y[0, 1] + a/3]])
# boxes_x[52, :] = np.array([[tri_x[4, 2], W, W, tri_x[4, 2]]])
# boxes_y[52, :] = np.array([[tri_y[3, 1] + a/2, tri_y[3, 1] + a/2, tri_y[3, 2] - a/10, tri_y[3, 2] - a/10]])
# boxes_x[53, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 2]]])
# boxes_y[53, :] = np.array([[tri_y[4, 2], tri_y[4, 2], tri_y[3, 1] + a/2, tri_y[3, 1] + a/2]])
# boxes_x[54, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 2]]])
# boxes_y[54, :] = np.array([[tri_y[4, 1], tri_y[4, 1], tri_y[4, 2], tri_y[4, 2]]])
return boxes_x, boxes_y
def create_boxes(H, W, tri_x, tri_y):
boxes_x = np.zeros((27, 4))
boxes_y = np.zeros((27, 4))
# Top seven boxes
boxes_x[0, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[0, :] = np.array([[tri_y[0, 0], tri_y[0, 0], H, H]])
boxes_x[1, :] = np.array([[tri_x[0, 0], tri_x[0, 2], tri_x[0, 2], tri_x[0, 0]]])
boxes_y[1, :] = np.array([[tri_y[0, 0], tri_y[0, 2], H, H]])
boxes_x[2, :] = np.array([[tri_x[0, 2], tri_x[1, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[2, :] = np.array([[tri_y[0, 2], tri_y[1, 0], H, H]])
boxes_x[3, :] = np.array([[tri_x[1, 0], tri_x[1, 2], tri_x[1, 2], tri_x[1, 0]]])
boxes_y[3, :] = np.array([[tri_y[1, 0], tri_y[1, 2], H, H]])
boxes_x[4, :] = np.array([[tri_x[1, 2], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[4, :] = np.array([[tri_y[1, 2], tri_y[2, 0], H, H]])
boxes_x[5, :] = np.array([[tri_x[2, 0], tri_x[2, 2], tri_x[2, 2], tri_x[2, 0]]])
boxes_y[5, :] = np.array([[tri_y[2, 0], tri_y[2, 2], H, H]])
boxes_x[6, :] = np.array([[tri_x[2, 2], W, W, tri_x[2, 2]]])
boxes_y[6, :] = np.array([[tri_y[2, 2], tri_y[2, 2], H, H]])
# Bottom seven boxes
boxes_x[7, :] = np.array([[0, tri_x[0, 0], tri_x[0, 0], 0]])
boxes_y[7, :] = np.array([[0, 0, tri_y[0, 0], tri_y[0, 0]]])
boxes_x[8, :] = np.array([[tri_x[0, 0], tri_x[0, 1], tri_x[0, 1], tri_x[0, 0]]])
boxes_y[8, :] = np.array([[0, 0, tri_y[0, 1], tri_y[0, 0]]])
boxes_x[9, :] = np.array([[tri_x[0, 1], tri_x[3, 0], tri_x[3, 0], tri_x[0, 1]]])
boxes_y[9, :] = np.array([[0, 0, tri_y[3, 0], tri_y[0, 1]]])
boxes_x[10, :] = np.array([[tri_x[3, 0], tri_x[3, 1], tri_x[3, 1], tri_x[3, 0]]])
boxes_y[10, :] = np.array([[0, 0, tri_y[3, 1], tri_y[3, 0]]])
boxes_x[11, :] = np.array([[tri_x[3, 1], tri_x[4, 0], tri_x[4, 0], tri_x[3, 1]]])
boxes_y[11, :] = np.array([[0, 0, tri_y[4, 0], tri_y[3, 1]]])
boxes_x[12, :] = np.array([[tri_x[4, 0], tri_x[4, 1], tri_x[4, 1], tri_x[4, 0]]])
boxes_y[12, :] = np.array([[0, 0, tri_y[4, 1], tri_y[4, 0]]])
boxes_x[13, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 1]]])
boxes_y[13, :] = np.array([[0, 0, tri_y[4, 1], tri_y[4, 1]]])
# The 8 boxes enclosed by the 5 triangles
boxes_x[14, :] = np.array([[tri_x[0, 1], tri_x[3, 0], tri_x[1, 0], tri_x[0, 2]]])
boxes_y[14, :] = np.array([[tri_y[0, 1], tri_y[3, 0], tri_y[1, 0], tri_y[0, 2]]])
boxes_x[15, :] = np.array([[tri_x[3, 0], tri_x[3, 2], tri_x[1, 1], tri_x[1, 0]]])
boxes_y[15, :] = np.array([[tri_y[3, 0], tri_y[3, 2], tri_y[1, 1], tri_y[1, 0]]])
boxes_x[16, :] = np.array([[tri_x[1, 1], tri_x[2, 0], tri_x[2, 0], tri_x[1, 2]]])
boxes_y[16, :] = np.array([[tri_y[1, 1], tri_y[1, 1], tri_y[2, 0], tri_y[1, 2]]])
boxes_x[17, :] = np.array([[tri_x[3, 2], tri_x[4, 0], tri_x[2, 0], tri_x[1, 1]]])
boxes_y[17, :] = np.array([[tri_y[3, 2], tri_y[3, 2], tri_y[1, 1], tri_y[1, 1]]])
boxes_x[18, :] = np.array([[tri_x[3, 1], tri_x[4, 0], tri_x[4, 0], tri_x[3, 2]]])
boxes_y[18, :] = np.array([[tri_y[3, 1], tri_y[4, 0], tri_y[3, 2], tri_y[3, 2]]])
boxes_x[19, :] = np.array([[tri_x[2, 0], tri_x[2, 1], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[19, :] = np.array([[tri_y[1, 1], tri_y[1, 1], tri_y[2, 1], tri_y[2, 0]]])
boxes_x[20, :] = np.array([[tri_x[4, 0], tri_x[4, 2], tri_x[2, 1], tri_x[2, 0]]])
boxes_y[20, :] = np.array([[tri_y[3, 2], tri_y[3, 2], tri_y[1, 1], tri_y[1, 1]]])
boxes_x[21, :] = np.array([[tri_x[4, 0], tri_x[4, 2], tri_x[4, 2], tri_x[4, 0]]])
boxes_y[21, :] = np.array([[tri_y[4, 0], tri_y[4, 2], tri_y[3, 2], tri_y[3, 2]]])
# Trailing 5 boxes
boxes_x[22, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 2]]])
boxes_y[22, :] = np.array([[tri_y[2, 1], tri_y[2, 1], tri_y[2, 2], tri_y[2, 2]]])
boxes_x[23, :] = np.array([[tri_x[2, 1], W, W, tri_x[2, 1]]])
boxes_y[23, :] = np.array([[tri_y[1, 1], tri_y[1, 1], tri_y[2, 1], tri_y[2, 1]]])
boxes_x[24, :] = np.array([[tri_x[4, 2], W, W, tri_x[2, 1]]])
boxes_y[24, :] = np.array([[tri_y[3, 2], tri_y[3, 2], tri_y[1, 1], tri_y[1, 1]]])
boxes_x[25, :] = np.array([[tri_x[4, 2], W, W, tri_x[4, 2]]])
boxes_y[25, :] = np.array([[tri_y[4, 2], tri_y[4, 2], tri_y[3, 2], tri_y[3, 2]]])
boxes_x[26, :] = np.array([[tri_x[4, 1], W, W, tri_x[4, 2]]])
boxes_y[26, :] = np.array([[tri_y[4, 1], tri_y[4, 1], tri_y[4, 2], tri_y[4, 2]]])
return boxes_x, boxes_y
# Global variables
header = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
object blockMeshDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
"""
footer = '\n// ************************************************************************* //'
initial_p = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object p;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -2 0 0 0 0];
internalField uniform 0;
boundaryField
{
frontAndBack
{
type zeroGradient;
}
triangleWalls
{
type zeroGradient;
}
outsideWalls
{
type zeroGradient;
}
inlet
{
type zeroGradient;
}
outlet
{
type zeroGradient;
}
}
// ************************************************************************* //
"""
initial_U = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volVectorField;
object U;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 1 -1 0 0 0 0];
internalField uniform (0.00001 0 0);
boundaryField
{
frontAndBack
{
type fixedValue;
value uniform (0 0 0);
}
triangleWalls
{
type fixedValue;
value uniform (0 0 0);
}
outsideWalls
{
type fixedValue;
value uniform (0 0 0);
}
inlet
{
type turbulentInlet;
referenceField uniform (""" + str(V_in) + """ 0 0);
fluctuationScale (""" + str(TI) + """ """ + str(TI) + """ """ + str(TI) + """);
value uniform (""" + str(V_in) + """ 0 0);
}
outlet
{
type inletOutlet;
inletValue uniform (0.00001 0 0);
value uniform (0 0 0);
}
}
// ************************************************************************* //
"""
initial_k = """/*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.4.0 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
location "0";
object k;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -2 0 0 0 0];
internalField uniform 0;
boundaryField
{
frontAndBack
{
type fixedValue;
value uniform 0;
}
triangleWalls
{
type fixedValue;
value uniform 0;
}
outsideWalls
{
type fixedValue;
value uniform 0;
}
inlet
{
type fixedValue;
value uniform 2e-05;
}
outlet
{
type inletOutlet;
value uniform 0;
inletValue uniform 0;
}
}
// ************************************************************************* //
"""
initial_nuSgs = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object nuSgs;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -1 0 0 0 0];
internalField uniform 0;
boundaryField
{
frontAndBack
{
type zeroGradient;
}
triangleWalls
{
type zeroGradient;
}
outsideWalls
{
type zeroGradient;
}
inlet
{
type zeroGradient;
}
outlet
{
type zeroGradient;
}
}
// ************************************************************************* //
"""
initial_nuTilda = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
object nuTilda;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -1 0 0 0 0];
internalField uniform 0;
boundaryField
{
frontAndBack
{
type fixedValue;
value uniform 0;
}
triangleWalls
{
type fixedValue;
value uniform 0;
}
outsideWalls
{
type fixedValue;
value uniform 0;
}
inlet
{
type fixedValue;
value uniform 0;
}
outlet
{
type inletOutlet;
inletValue uniform 0;
value uniform 0;
}
}
// ************************************************************************* //
"""
controlDict1 = """/*--------------------------------*- C++ -*----------------------------------*\\
| ========= | |
| \\\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\\ / O peration | Version: 2.4.0 |
| \\\ / A nd | Web: www.OpenFOAM.org |
| \\\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class dictionary;
location "system";
object controlDict;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
application pimpleFoam;
startFrom startTime;
startTime 0;
stopAt endTime;
endTime 4;
deltaT 0.00025;
writeControl adjustableRunTime;
writeInterval 0.1;
purgeWrite 0;
writeFormat ascii;
writePrecision 6;
writeCompression off;
timeFormat general;
timePrecision 6;
runTimeModifiable true;
adjustTimeStep true;
MaxCo 0.95;
functions
{
fieldAverage1
{
type fieldAverage;
functionObjectLibs ("libfieldFunctionObjects.so");
enabled true;
cleanRestart false;
outputControl outputTime;
resetOnOutput false;
timeStart 0;
fields
(
U
{
mean on;
prime2Mean on;
base time;
}
);
}
probes
{
functionObjectLibs ("libsampling.so");
type probes;
name probes;
outputControl timeStep;
outputInterval 1;
fields
(
p U UMean
);
probeLocations
(
"""
controlDict2 = """ );
}
}
// ************************************************************************* //
"""
if __name__ == '__main__':
main()
|
from taiga.requestmaker import RequestMaker
from taiga.models import Severity, Severities
import unittest
from mock import patch
class TestSeverities(unittest.TestCase):
@patch('taiga.models.base.ListResource._new_resource')
def test_create_severity(self, mock_new_resource):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
mock_new_resource.return_value = Severity(rm)
sv = Severities(rm).create(1, 'SV 1')
mock_new_resource.assert_called_with(
payload={'project': 1, 'name': 'SV 1'}
)
|
"""This module visualizes the data by Borough and over all of NYC."""
#author: Matthew Dunn
#netID: mtd368
#date: 12/12/2015
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class retaurantGradeAnalyzer (object):
def __init__(self, allrestaurantsData, listofBoros):
self.allrestaurantsData = allrestaurantsData[['BORO', 'GRADE', 'GRADE DATE']]
self.allrestaurantsData['Counter'] = 1
self.listofBoros = listofBoros
def restsbygradeovertime(self):
restaurantsovertime = self.allrestaurantsData
restaurantsovertime1 = restaurantsovertime.groupby(['GRADE DATE', 'GRADE']).size().unstack()
restaurantsovertime1 = restaurantsovertime1.resample('Q')
restaurantsovertime1.plot(kind='line', by=['GRADE DATE', 'GRADE'])
plt.title('NYC Restaurant Grade Imporvement')
plt.savefig('figures/nyc_grade_improvement_ny.pdf',format = 'pdf')
def createsingleborotoplot(self):
for i in np.arange(len(self.listofBoros)):
restaurantsovertime = self.allrestaurantsData[self.allrestaurantsData['BORO'] == self.listofBoros[i]]
restaurantsovertime1 = restaurantsovertime.groupby(['GRADE DATE', 'GRADE']).size().unstack()
restaurantsovertime1 = restaurantsovertime1.resample('Q')
restaurantsovertime1.plot(kind='line', by=['GRADE DATE', 'GRADE'])
plt.title('Restaurant Grade Imporvment for ' + self.listofBoros[i])
plt.savefig('figures/boro_grade_improvement_' + self.listofBoros[i] + '.pdf',format = 'pdf')
|
"""Helpers stuff"""
import yaml
base_configuration = [
{
'application': {
'name': 'Flaskbox API',
}
},
{
'route': {
'name': 'users',
'fields': [
{'name': 'string'},
{'last_name': 'string'},
{'users': 'array_str'},
{'ids': 'array_int'},
{'created_at': 'datetime'}
]
}
}
]
def _create_yml_file(file_name: str = 'flaskbox.yml'):
"""
:param file_name: Name for a file.
"""
with open(file_name, 'w') as file:
yaml.dump(base_configuration, file, default_flow_style=False)
def create_init_file():
"""Create the flaskbox.yml file
"""
_create_yml_file()
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'users/last_synced', views.UserLastSyncedItemview.as_view(), name="User Last sync time"),
url(r'users/have_tokens', views.HaveTokens.as_view(), name="have_tokens"),
url(r'users/userrequestbackfill', views.UserBackfillRequestView.as_view(),name="user_backfill_request"),
url(r'users/aa_custom_ranges', views.AACustomRangesView.as_view(),name="user_aa_custom_ranges"),
url(r'users/id',views.get_user_id,name="user_id"),
] |
from time import sleep
import random
import os
import time
import sys
import json
import re
from urllib import request, parse
import name_get
import Chrome_driver
import email_imap as imap
import re
from pyrobot import Robot
import Submit_handle
from selenium.webdriver.support.ui import Select
'''
Adsmain health
Auto
'''
def web_submit(submit,chrome_driver,debug=0):
# url = 'http://gkd.cooldatingz.com/c/11377/4?clickid=[clickid]&bid=[bid]&siteid=[siteid]&countrycode=[cc]&operatingsystem=[operatingsystem]&campaignid=[campaignid]&category=[category]&connection=[connection]&device=[device]&browser=[browser]&carrier=[carrier]'
if debug == 1:
site = 'http://im.datingwithlili.com/im/click.php?c=8&key=0jp93r1877b94stq2u8rd6hd'
submit['Site'] = site
print('===========================')
chrome_driver.get(submit['Site'])
sleep(3000)
flag = 0
i = 0
while i <=3:
if 'trustedhealthquotes.com' in chrome_driver.current_url:
break
else:
writelog(chrome_driver.current_url)
chrome_driver.get(site)
sleep(5)
i = i + 1
try:
if 'trustedhealthquotes.com' in chrome_driver.current_url:
chrome_driver.find_element_by_xpath('//*[@id="address_1_zip"]').send_keys(submit['Auto']['zip'])
chrome_driver.find_element_by_xpath('//*[@id="quickform-submit"]').click()
handles = chrome_driver.window_handles
if len(handles) == 2:
chrome_driver.switch_to.window(handles[1])
a = [
'//*[@id="insured_1_gender_male"]',
'//*[@id="insured_1_gender_female"]'
]
num = random.randint(0,1)
chrome_driver.find_element_by_xpath(a[num]).click()
birthday = Submit_handle.get_auto_birthday(submit['Auto']['dateofbirth'])
month = birthday[0]
day = birthday[1]
year = birthday[2]
chrome_driver.find_element_by_xpath('//*[@id="insured_1_dobMM"]').send_keys(month)
chrome_driver.find_element_by_xpath('//*[@id="insured_1_dobDD"]').send_keys(day)
chrome_driver.find_element_by_xpath('//*[@id="insured_1_dobYYYY"]').send_keys(year)
num_info = Submit_handle.get_height_info()
chrome_driver.find_element_by_xpath('//*[@id="insured_1_heightFT"]').send_keys(num_info['Height_FT'])
chrome_driver.find_element_by_xpath('//*[@id="insured_1_heightIN"]').send_keys(num_info['Height_Inch'])
chrome_driver.find_element_by_xpath('//*[@id="insured_1_weight"]').send_keys(num_info['Weight'])
chrome_driver.find_element_by_xpath('//*[@id="first_name"]').send_keys(submit['Auto']['firstname'])
chrome_driver.find_element_by_xpath('//*[@id="last_name"]').send_keys(submit['Auto']['lastname'])
chrome_driver.find_element_by_xpath('//*[@id="address_1_street1"]').send_keys(submit['Auto']['address'])
chrome_driver.find_element_by_xpath('//*[@id="address_1_city"]').send_keys(submit['Auto']['city'])
s1 = Select(chrome_driver.find_element_by_xpath('//*[@id="address_1_state"]'))
s1.select_by_value(str(submit['Auto']['state'])) # 选择value="o2"的项
homephone = submit['Auto']['homephone'].split('.')[0]
chrome_driver.find_element_by_xpath('//*[@id="phone1"]').send_keys(str(homephone)[0:3])
chrome_driver.find_element_by_xpath('//*[@id="phone_2"]').send_keys(str(homephone)[3:6])
chrome_driver.find_element_by_xpath('//*[@id="phone3"]').send_keys(str(homephone)[6:10])
chrome_driver.find_element_by_xpath('//*[@id="email"]').send_keys(submit['Auto']['email'])
num_click = random.randint(1,4)
# sleep(2000)
for i in range(num_click):
chrome_driver.find_element_by_xpath('//*[@id="plus"]').click()
chrome_driver.find_element_by_xpath('//*[@id="income-widget"]/label[4]').click()
chrome_driver.find_element_by_xpath('//*[@id="healthForm"]/div[14]/button').click()
sleep(20)
flag = 1
except Exception as e:
print(str(e))
# sleep(3000)
try:
chrome_driver.close()
chrome_driver.quit()
except Exception as e:
print(str(e))
return flag
def check_email(submit):
print(submit['Email_emu'])
data = {'email': submit['Email_emu']}
data = parse.urlencode(data).encode('gbk')
req = request.Request(url, data=data)
page = ''
for i in range(5):
try:
page = request.urlopen(req,timeout=10.0).read()
except:
continue
if str(page) != '':
break
print(page)
if 'GOOD_EMAIL' not in str(page):
if page == '':
return -1 #netwrong
else:
return 1 #fail
else:
print(submit['Email_emu'],'is GOOD_EMAIL')
return 0 #success
def check_name(submit):
data = {'username':submit['name']}
data = parse.urlencode(data).encode('gbk')
req = request.Request(url2, data=data)
page = ''
for i in range(5):
try:
page = request.urlopen(req,timeout=10.0).read()
except Exception as msg:
print(msg)
continue
if str(page) != '':
break
print(page)
if 'OK' not in str(page):
return 1 #fail
else:
return 0 #success
def email_confirm(submit):
site = ''
for i in range(10):
msg_content = imap.email_getlink(submit,'Subject: Verify at Cam4 to Continue')
print(len(msg_content))
if 'cam4' not in msg_content:
print('Target Email Not Found !')
sleep(10)
else:
c = msg_content.find('get verified:')
a = msg_content.find('http://www.cam4.com/signup/confirm?uname=',c)
b = msg_content.find('\n',a)
site = msg_content[a:b]
return site
return site
if __name__=='__main__':
submit={}
submit['ua'] = ''
submit['name'] = 'dfdss2343'
submit['pwd'] = 'cvbsasdsddasz'
submit['Email_emu'] = 'BettinaNavarroGx@aol.com'
submit['Email_emu_pwd'] = 'G9x1C1zf'
# LlwthdKlhcvr@hotmail.com----glL9jPND4nDp
# site='http://www.baidu.com'
web_submit(submit)
# BettinaNavarroGx@aol.com G9x1C1zf
# site = email_confirm(submit)
# print(site)
# test()
|
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
#import hdbscan as hdb
from sklearn.cluster import KMeans, Birch
from wordcloud import WordCloud
from big_picture.clusters import Cluster
from big_picture.pre_processor import pre_process
from big_picture.vectorizers import tf_idf, embedding_strings
class Label():
"""
Class that creates an object to be labelled and organized by topic.
Parameters
----------
df : df
DataFrame containing the features to be analyzed:
- Title;
- Authors;
- Publisher;
- Date;
- Link;
- Content.
label: string
Label correponding to the topic of the class.
vec_name: string (default: embedding_string)
Name of vectorizer to be used for vectorizing. Options:
embedding_string
tf_idf
model_name: string (default: kmeans)
Name of model to be used for clustering
"""
def __init__(self, df, label, vec_name='embedding_strings', model_name='kmeans', **kwargs):
self.label = label
if vec_name == 'tf_idf':
vectors, self.vectorizer = tf_idf(df.pre_processed_text)
elif vec_name == 'embedding_strings':
vectors, self.vectorizer = embedding_strings(df.pre_processed_text, return_model=True)
else:
pass
self.model = None
self.sizes = None
if model_name == 'kmeans':
self.clusters= self.kmeans(df,
'pre_processed_text',
vectors,
clusters=1+len(df)//30,
**kwargs
)
elif model_name == 'birch':
self.clusters= self.birch(df,
'pre_processed_text',
vectors,
clusters=1+len(df)//30,
**kwargs
)
else:
print("No model was found, this may cause problems in the future")
def predict(self, vector):
"""
Function that predicts the closest cluster number of a given vectorized sample
"""
if not self.model:
raise Exception('No model found')
return self.model.predict(vector)[0]
def extract_top_n_words_per_topic(self, tf_idf, count, docs_per_topic, n=20):
"""
Extracts the top words of a topics.
Takes a dataframe with aggregated articles grouped by topic
and the information ontained through tf-idf vectorization of it.
"""
words = count.get_feature_names()
labels = list(docs_per_topic.topic)
tf_idf_transposed = tf_idf.T
indices = tf_idf_transposed.argsort()[:, -n:]
top_n_words = [[words[j] for j in indices[i]][::-1] for i, label in enumerate(labels)]
return top_n_words
def c_tf_idf(self, documents, m, ngram_range=(1, 1)):
"""
Vectorizer a dataframe of documents that have been agregated by cluster.
Parameter 'm' is the total number of articles in the data set
"""
count = CountVectorizer(ngram_range=ngram_range, stop_words="english").fit(documents)
t = count.transform(documents).toarray()
w = t.sum(axis=1)
tf = np.divide(t.T, w)
sum_t = t.sum(axis=0)
idf = np.log(np.divide(m, sum_t)).reshape(-1, 1)
tf_idf = np.multiply(tf, idf)
return tf_idf, count
def output_format(self, X, column, **kwargs):
"""
Returns a list of cluster objects with the dataframe and topic
Optionally the size of each cluster
"""
docs_per_topic = X.groupby(['topic'], as_index = False).agg({column: ' '.join})
# print(X.pre_processed_text.iloc[0])
# print(X.groupby(['topic'], as_index = False).count())
tf_idf, count = self.c_tf_idf(docs_per_topic[column].values, m=len(X))
top_n_words = self.extract_top_n_words_per_topic(tf_idf, count, docs_per_topic, n=20)
self.sizes = (X.groupby(['topic'])
.content
.count()
.reset_index()
.rename({"topic": "topic", "content": "Size"}, axis='columns')
.sort_values("Size", ascending=False))
clusters = []
for topic in X['topic'].unique():
clusters.append((X[X.topic == topic]))
output = []
for i, cluster in enumerate(clusters):
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
min_font_size = 10).generate(docs_per_topic[column].iloc[i])
output.append(
Cluster(
cluster,
top_n_words[i],
wordcloud,
**kwargs
)
)
return output
def kmeans(self, X, column, vectors, clusters=8, **kwargs):
"""
Kmean model that outputs a list of cluster objects with the dataframe and topic
Parameters
----------
X : df
Data Frame of articles
column : string
the preproccessed column name
vectors : string
vectorized data of the preproccessed column
clusters : int (default: 8)
intended number of clusters
"""
self.model = KMeans(n_clusters=clusters).fit(vectors)
X['topic'] = self.model.labels_
return self.output_format(X, column,**kwargs)
def birch(self, X, column, vectors, clusters, threshold=0.5, **kwargs):
"""
Birch model that outputs a list of cluster objects with the dataframe and topic
Parameters
----------
X : df
Data Frame of articles
column : string
the preproccessed column name
vectors : string
vectorized data of the preproccessed column
threshold : int (default: 0.5)
The radius of the subcluster obtained by merging a new sample and the closest
subcluster should be lesser than the threshold. Otherwise a new subcluster is started.
Setting this value to be very low promotes splitting and vice-versa.
"""
self.model = Birch(threshold=threshold, n_clusters=clusters).fit(vectors)
X['topic'] = self.model.labels_
return self.output_format(X, column,**kwargs)
|
# -*- coding:utf-8 -*-
# Created by LuoJie at 11/16/19
import os
import pathlib
# 获取项目根目录
root = pathlib.Path(os.path.abspath(__file__)).parent.parent
# 训练数据路径
train_data_path = os.path.join(root, 'data', 'AutoMaster_TrainSet.csv')
# 测试数据路径
test_data_path = os.path.join(root, 'data', 'AutoMaster_TestSet.csv')
# 停用词路径
stop_word_path = os.path.join(root, 'data', 'stopwords/哈工大停用词表.txt')
# 自定义切词表
user_dict = os.path.join(root, 'data', 'user_dict.txt')
# 预处理后的训练数据
train_seg_path = os.path.join(root, 'data', 'train_seg_data.csv')
# 预处理后的测试数据
test_seg_path = os.path.join(root, 'data', 'test_seg_data.csv')
# 合并训练集测试集数据
merger_seg_path = os.path.join(root, 'data', 'merged_train_test_seg_data.csv')
|
import math
def area_of_a_triangle(a,b,c):
semiperimeter = (a + b + c)/2
area = math.sqrt(semiperimeter*(semiperimeter-a)*(semiperimeter-b)*(semiperimeter-c))
return area
print(area_of_a_triangle(3,4,5)) |
from Tkinter import *
import serial
import thread
#{forward:0,backward:1,left:2,right:3,submerge:4,emerge:5}
arduino = serial.Serial('/dev/ttyACM0',9600)
input_count = 0
pwms_dictionary = {"forward":0,"backward":1,"left":2,"right":3,"submerge":4,"emerge":5}
top = Tk()
top.configure(bg="#353839")
top.wm_title("CSLAUV Testing Utility")
def send_command(index,direction,pwm):
arduino.write(str(index) + ":" + str(pwms_dictionary[direction]) + ":" + str(pwm))
def create_frame(thrusterName, r, c):
thrusterFrame = Frame(top,bg="#353839")
label = Label(thrusterFrame, text = thrusterName, justify="center",fg = "#ffd700",bg="#353839", font=("Courier",14))
label.grid(row=r, column=c, padx = 30, pady = 10)
var = StringVar(thrusterFrame)
pwm_option = OptionMenu(thrusterFrame,var,"forward", "backward", "left", "right", "submerge", "emerge")
pwm_option.config(width =20)
pwm_option.grid(row = r, column = c+1)
var.set("forward") # initial value
entry = Entry(thrusterFrame,bd=5, justify=CENTER)
entry.grid(row=r,column= c+2)
entry.insert(END, 1500)
button = Button(master=thrusterFrame, text='UPDATE', command= lambda: send_command(r - 3,var.get(),entry.get()))
button.grid(row=r,column= c+3)
thrusterFrame.grid(row = r, column=c)
console = Frame(top,bg="#000000")
canvas=Canvas(console,bg="#000000", width = 685,height = 230)
def create_console():
try:
thread.start_new_thread(update_console,())
except:
print "Error: unable to start thread"
def myfunction(event):
canvas.configure(scrollregion=canvas.bbox("all"),width=685,height=230)
def update_console():
#console = Frame(top,bg="#000000")
console.grid(row=12,column=0,pady=25)
console.configure(width = 725,height = 200)
#canvas=Canvas(console,bg="#000000" )
frame=Frame(canvas, bg="#000000")
myscrollbar=Scrollbar(console,orient=VERTICAL,command=canvas.yview)
canvas.configure(yscrollcommand=myscrollbar.set)
myscrollbar.pack(side="right",fill="y")
canvas.pack(side="left")
canvas.create_window((0,0),window=frame,anchor='nw')
frame.bind("<Configure>",myfunction)
count = 0
while True:
canvas.yview_moveto(1.0)
data = arduino.readline()
if data:
label = Label(frame, text = data.strip(),justify="left",fg = "#ffd700",bg="#000000", font=("Courier",10),wraplength=700)
label.grid(row=count, column=0, padx = 0, pady = 0,sticky="w")
canvas.yview_moveto(1.0)
count += 1
label = Label(top, text = "CSLAUV TESTING UTILITY", justify="center",fg = "#F8F8FF",bg="#353839", font=("Courier",20,"bold"))
label.grid(row=0, column=0, pady = 20)
create_frame("Thruster 1",4,0)
create_frame("Thruster 2",5,0)
create_frame("Thruster 3",6,0)
create_frame("Thruster 4",7,0)
create_frame("Thruster 5",8,0)
create_frame("Thruster 6",9,0)
create_frame("Thruster 7",10,0)
create_frame("Thruster 8",11,0)
create_console()
top.resizable(width=False, height=False)
top.minsize(width=700, height=700)
top.maxsize(width=700, height=700)
top.mainloop()
|
# -*- coding: utf-8 -*-
import cookielib
import datetime
import json
import os
import sys
import urllib
import urllib2
import urlparse
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
#######################################
# global constants
url_base = 'http://api.rtvslo.si/ava/'
client_id = '82013fb3a531d5414f478747c1aca622'
delete_action = 'deleteall893745927368199189474t52910373h2i2u2j2788927628018736tghs8291282'
search_history_file = xbmc.translatePath(xbmcaddon.Addon().getAddonInfo('profile')).decode('utf-8')
try:
os.makedirs(search_history_file)
except:
pass
search_history_file = os.path.join(search_history_file, 'history.json')
# classes
#######################################
# functions
def do_MainMenu():
# login
api = login()
# ISKANJE
li = xbmcgui.ListItem('Iskanje')
url = build_url(base, {'content_type': contentType, 'menu': 'SearchHistory', 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
# ARHIV ODDAJ
li = xbmcgui.ListItem('Arhiv Oddaj')
url = build_url(base, {'content_type': contentType, 'menu': 'ShowsArchive', 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
# ARHIV PRISPEVKOV
li = xbmcgui.ListItem('Arhiv Prispevkov')
url = build_url(base, {'content_type': contentType, 'menu': 'ClipsArchive', 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
# ARHIV PO ABECEDI
li = xbmcgui.ListItem('Arhiv Po Abecedi')
url = build_url(base, {'content_type': contentType, 'menu': 'ListLetters', 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def do_ShowsArchive():
# maps for genres and sorting
li = xbmcgui.ListItem('Zvrsti')
url = build_url(base, {'content_type': contentType, 'menu': 'ListShowGenres', 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Sortiranje')
url = build_url(base,
{'content_type': contentType, 'menu': 'ListShowSortorders', 'showTypeId': showTypeId, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['q'] = ''
url_query['showTypeId'] = str(showTypeId)
url_query['sort'] = str(sort)
url_query['order'] = 'desc'
url_query['pageSize'] = '999'
url_query['source'] = ''
url_query['hearingAid'] = '0'
url_query['clip'] = 'show'
url_query['from'] = str(list_date)
url_query['to'] = str(list_date)
url_query['WPId'] = ''
url_query['zkp'] = '0'
url_query['callback'] = 'jQuery11130980077945755083_1462458118383'
url_query['_'] = '1462458118384'
url = build_url(url_base + 'getSearch', url_query)
getItemList(url, {'listType': 'streamlist', 'paging_style': 'date'})
def do_ClipsArchive():
# maps for genres and sorting
li = xbmcgui.ListItem('Zvrsti')
url = build_url(base, {'content_type': contentType, 'menu': 'ListClipGenres', 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Sortiranje')
url = build_url(base,
{'content_type': contentType, 'menu': 'ListClipSortorders', 'showTypeId': showTypeId, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['q'] = ''
url_query['showTypeId'] = str(showTypeId)
url_query['sort'] = str(sort)
url_query['order'] = 'desc'
url_query['pageSize'] = '999'
url_query['source'] = ''
url_query['hearingAid'] = '0'
url_query['clip'] = 'clip'
url_query['from'] = str(list_date)
url_query['to'] = str(list_date)
url_query['WPId'] = ''
url_query['zkp'] = '0'
url_query['callback'] = 'jQuery111307342043845078507_1462458568679'
url_query['_'] = '1462458568680'
url = build_url(url_base + 'getSearch', url_query)
# download response from rtvslo api
getItemList(url, {'listType': 'streamlist', 'paging_style': 'date'})
def do_ListGenres(nextmenu):
li = xbmcgui.ListItem('Informativni')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 34, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Športni')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 35, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Izobraževalni')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 33, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Kulturno Umetniški')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 30, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Razvedrilni')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 36, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Verski')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 32, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Otroški')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'showTypeId': 31, 'sort': sort, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Mladinski')
url = build_url(base, {'content_type': contentType, 'menu': nextmenu, 'showTypeId': 15890838, 'sort': sort,
'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def do_ListSortorders(nextmenu):
li = xbmcgui.ListItem('Po Datumu')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'sort': 'date', 'showTypeId': showTypeId,
'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Po Naslovu')
url = build_url(base,
{'content_type': contentType, 'menu': nextmenu, 'sort': 'title', 'showTypeId': showTypeId,
'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
li = xbmcgui.ListItem('Po Popularnosti')
url = build_url(base, {'content_type': contentType, 'menu': nextmenu, 'sort': 'popularity',
'showTypeId': showTypeId, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def do_Search(search_string, search_type):
if search_string == '':
keyboard = xbmc.Keyboard('', 'Iskanje', False)
keyboard.doModal()
if not keyboard.isConfirmed() or not keyboard.getText():
xbmcgui.Dialog().ok('RTV Slovenija', 'Iskanje je prekinjeno')
return
search_string = keyboard.getText()
if search_type < 0:
search_type = xbmcgui.Dialog().select('Izberi:', ['Iskanje Prispevkov', 'Iskanje Oddaj'])
if search_type < 0:
xbmcgui.Dialog().ok('RTV Slovenija', 'Iskanje je prekinjeno')
return
# all is set, let's do this
delete_history_item(search_string, True)
if type(search_string) == unicode:
search_string = search_string.encode('utf-8')
search_string = search_string.replace(' ', '+')
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['q'] = search_string
url_query['showTypeId'] = str(showTypeId)
url_query['sort'] = str(sort)
url_query['order'] = 'desc'
url_query['pageSize'] = '12'
url_query['pageNumber'] = str(page)
url_query['source'] = ''
url_query['hearingAid'] = '0'
if search_type == 0:
url_query['clip'] = 'clip'
else:
url_query['clip'] = 'show'
url_query['from'] = '' # '2007-01-01'
url_query['to'] = ''
url_query['WPId'] = ''
url_query['zkp'] = '0'
url_query['callback'] = 'jQuery111307342043845078507_1462458568679'
url_query['_'] = '1462458568680'
url = build_url(url_base + 'getSearch', url_query)
getItemList(url, {'listType': 'streamlist', 'paging_style': 'page', 'title_style': 'date',
'search_string': search_string, 'search_type': search_type})
def do_SearchHistory():
li = xbmcgui.ListItem('Novo Iskanje')
li.addContextMenuItems([('Izbriši zgodovino', 'RunPlugin(%s)' % (build_url(base, {'content_type': contentType,
'menu': 'DeleteHistory',
'search_string': delete_action,
'api': api})))])
url = build_url(base, {'content_type': contentType, 'menu': 'Search', 'sort': 'date', 'showTypeId': showTypeId,
'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
try:
with open(search_history_file, "r") as s_file:
s_file_data = json.load(s_file)
except:
return
for search_entry in s_file_data.get('SearchHistory', []):
search_string = search_entry.replace(' ', '+')
search_string = search_string.encode('utf-8')
li = xbmcgui.ListItem(search_entry)
li.addContextMenuItems([('Izbriši iskanje', 'RunPlugin(%s)' % (build_url(base, {'content_type': contentType,
'menu': 'DeleteHistory',
'search_string': search_string,
'api': api}))),
('Izbriši zgodovino', 'RunPlugin(%s)' % (build_url(base, {'content_type': contentType,
'menu': 'DeleteHistory',
'search_string': delete_action,
'api': api})))])
url = build_url(base, {'content_type': contentType, 'menu': 'Search', 'sort': 'date', 'showTypeId': showTypeId,
'search_string': search_string, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def delete_history_item(search_string, also_insert):
if not also_insert:
if search_string == delete_action:
open(search_history_file, 'w').close()
return
if type(search_string) != unicode:
search_string = search_string.decode('utf-8')
try:
with open(search_history_file, "r") as s_file:
s_file_data = json.load(s_file)
except:
s_file_data = {}
search_list = s_file_data.get('SearchHistory', [])
try:
search_list.remove(search_string)
except:
pass
if also_insert:
search_list.insert(0, search_string)
search_list = search_list[0:11]
s_file_data['SearchHistory'] = search_list
with open(search_history_file, "w") as s_file:
json.dump(s_file_data, s_file)
def do_ListShows():
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['sort'] = 'title'
url_query['order'] = 'asc'
url_query['pageSize'] = '100'
url_query['hidden'] = '0'
url_query['start'] = letter
url_query['callback'] = 'jQuery111306175395867148092_1462381908718'
url_query['_'] = '1462381908719'
url = build_url(url_base + 'getShowsSearch', url_query)
# download response from rtvslo api
getItemList(url, {'listType': 'showlist'})
def do_ListStreams():
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['sort'] = 'date'
url_query['order'] = 'desc'
url_query['pageSize'] = '12'
url_query['pageNumber'] = str(page)
url_query['from'] = '1991-01-01'
url_query['clip'] = 'show'
url_query['showId'] = show_id
url_query['callback'] = 'jQuery11130007442688502199202_1462387460339'
url_query['_'] = '1462387460342'
url = build_url(url_base + 'getSearch', url_query)
# download response from rtvslo api
getItemList(url, {'listType': 'streamlist', 'paging_style': 'page', 'title_style': 'date'})
def getItemList(url, _args):
rtvsloHtml = urllib2.urlopen(url)
rtvsloData = rtvsloHtml.read()
rtvsloHtml.close()
# extract json from response
x = rtvsloData.find('({')
y = rtvsloData.rfind('});')
if x < 0 or y < 0:
xbmcgui.Dialog().ok('RTV Slovenija', 'Strežnik ni posredoval seznama.')
return
# parse json to a list of streams
rtvsloData = rtvsloData[x + 1:y + 1]
if _args.get('listType') == 'showlist':
parseShowsToShowList(rtvsloData)
elif _args.get('listType') == 'streamlist':
parseShowToStreamList(rtvsloData, _args)
elif _args.get('listType') == 'singlestream':
parseStreamToListEntry(rtvsloData, _args)
def build_url(base, query):
return base + '?' + urllib.urlencode(query)
def login():
# get settings
username = xbmcplugin.getSetting(handle, 'username')
password = xbmcplugin.getSetting(handle, 'password')
# no Requests library dependency required...
url = 'https://www.rtvslo.si/prijava'
referurl = 'https://www.rtvslo.si'
params = urllib.urlencode({'action': 'login', 'referer': referurl, 'user': username, 'pass': password})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
cookies = cookielib.LWPCookieJar()
handlers = [
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.HTTPCookieProcessor(cookies)
]
opener = urllib2.build_opener(*handlers)
req = urllib2.Request(url, params, headers)
response = opener.open(req)
cookies_dict = {}
for cookie in cookies:
cookies_dict[str(cookie.name)] = cookie.value
a = ''
try:
a = str(cookies_dict['APISESSION'])
except:
xbmcgui.Dialog().ok('RTV Slovenija',
'Prijava neuspešna!\n\nNekatere vsebine brez prijave niso dosegljive.\nVnos podatkov za prijavo je mogoč v nastavitvah.')
return a
def parseShowsToShowList(js):
showList = []
j = json.loads(js)
j = j['response']['response']
if len(j) == 0:
return
# list shows
for show in j:
if (contentType == 'audio' and show['mediaType'] == 'radio') or (
contentType == 'video' and show['mediaType'] == 'tv'):
li = xbmcgui.ListItem(show['title'], iconImage=show['thumbnail']['show'])
url = build_url(base, {'content_type': contentType, 'menu': 'ListStreams', 'page': 0, 'id': show['id'],
'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def parseShowToStreamList(js, _args):
j = json.loads(js)
j = j['response']['recordings']
# find playlists and list streams
for stream in j:
if (contentType == 'audio' and stream['mediaType'] == 'audio') or (
contentType == 'video' and stream['mediaType'] == 'video'):
# url parameters
url_query = {}
url_query['client_id'] = client_id
url_query['session_id'] = api
url_query['callback'] = 'jQuery1113023734881856870338_1462389077542'
url_query['_'] = '1462389077543'
url = build_url(url_base + 'getRecording/' + stream['id'], url_query)
# download response from rtvslo api
getItemList(url, {'listType': 'singlestream', 'title_style': _args.get('title_style', 'time')})
if _args.get('paging_style', '') == 'date':
# previous day
ordinal_date = list_date.toordinal() - 1
li = xbmcgui.ListItem('> ' + str(datetime.date.fromordinal(ordinal_date)) + ' >')
url = build_url(base, {'content_type': contentType, 'menu': menu, 'ordinal_date': ordinal_date, 'sort': sort,
'showTypeId': showTypeId, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
else:
if len(j) == 0:
return
# show next page marker
page_no = int(page) + 1
li = xbmcgui.ListItem('> ' + str(page_no) + ' >')
url = build_url(base, {'content_type': contentType, 'menu': menu, 'page': page_no, 'sort': sort,
'search_string': _args.get('search_string', ''),
'search_type': _args.get('search_type', -1),
'showTypeId': showTypeId, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
def parseStreamToListEntry(json_data, _args):
j = json.loads(json_data)
j = j['response']
if len(j) == 0:
return
typeOK = True
try:
if contentType == 'audio' and j['mediaType'] == 'video':
typeOK = False
if contentType == 'video' and j['mediaType'] == 'audio':
typeOK = False
except:
pass
if typeOK:
# newer video streams usually have this format
try:
stream_url = j['addaptiveMedia']['hls']
except:
# audio streams and some older video streams have this format
try:
stream_url = j['mediaFiles'][0]['streamers']['http']
if stream_url.find('ava_archive02') > 0:
stream_url = stream_url.replace("ava_archive02", "podcast/ava_archive02")
stream_url = stream_url + '/' + j['mediaFiles'][0]['filename']
except:
pass
# list stream
if stream_url:
stream_aired = j.get('broadcastDate', '')
if stream_aired != '':
stream_time = stream_aired[11:16]
else:
stream_time = ''
stream_genre = ''
try:
for g in j['broadcast']['genre']:
# stream_genre = stream_genre + ',' + g
stream_genre = g # rabimo samo zadnjega
# stream_genre = stream_genre[1:]
except:
pass
if _args.get('title_style') == 'date':
list_title = j.get('date') + ' - ' + j.get('title', '')
else:
list_title = stream_time + ' - ' + j.get('title', '')
list_item = xbmcgui.ListItem(list_title, j.get('showName', ''))
list_item.setArt({'thumb': j['images'].get('orig', ''),
'poster': j['images'].get('orig', ''),
'banner': j['images'].get('orig', ''),
'fanart': j['images'].get('orig', ''),
'clearart': j['images'].get('fp1', ''),
'clearlogo': j['images'].get('fp2', ''),
'landscape': j['images'].get('wide1', ''),
'icon': j['images'].get('fp3', '')})
if contentType == 'audio':
list_item.setInfo('music', {'duration': j.get('duration', '0'),
'genre': stream_genre,
'title': j.get('title', ''),
'playcount': j.get('views', '')})
elif contentType == 'video':
list_item.setInfo('video', {'duration': j.get('duration', '0'),
'genre': stream_genre,
'title': j.get('title', ''),
'playcount': j.get('views', ''),
'aired': stream_aired,
'plot': j.get('description', ''),
'plotoutline': j.get('showDescription', ''),
'tvshowtitle': j.get('showName', '')})
xbmcplugin.addDirectoryItem(handle=handle, url=stream_url, listitem=list_item)
#######################################
# main
if __name__ == "__main__":
try:
# arguments
Argv = sys.argv
# get add-on base url
base = str(Argv[0])
# get add-on handle
handle = int(Argv[1])
# in some cases kodi returns empty sys.argv[2]
if Argv[2] == '':
selection = xbmcgui.Dialog().select(
'Kodi ni posredoval informacije o vrsti vsebine.\n\nIzberi vrsto vsebine:', ['TV', 'Radio'])
if selection == 0:
Argv[2] = '?content_type=video'
else:
Argv[2] = '?content_type=audio'
# get add-on args
args = urlparse.parse_qs(Argv[2][1:])
# get content type
contentType = str(args.get('content_type')[0])
if contentType == 'audio':
xbmcplugin.setContent(handle, 'songs')
elif contentType == 'video':
xbmcplugin.setContent(handle, 'videos')
# get menu and other parameters
api = args.get('api', [''])[0]
menu = args.get('menu', ['MainMenu'])[0]
letter = args.get('letter', ['A'])[0]
show_id = args.get('id', [''])[0]
page = args.get('page', ['0'])[0]
showTypeId = args.get('showTypeId', ['0'])[0]
sort = args.get('sort', [''])[0]
srch_string = args.get('search_string', [''])[0]
srch_type = args.get('search_type', [-1])[0]
dateArg = args.get('ordinal_date')
if dateArg:
list_date = datetime.date.fromordinal(int(dateArg[0]))
else:
list_date = datetime.date.today()
# MENU SYSTEM
if menu == 'MainMenu':
do_MainMenu()
elif menu == 'ShowsArchive':
do_ShowsArchive()
elif menu == 'ListShowGenres':
do_ListGenres('ShowsArchive')
elif menu == 'ListShowSortorders':
do_ListSortorders('ShowsArchive')
elif menu == 'ClipsArchive':
do_ClipsArchive()
elif menu == 'ListClipGenres':
do_ListGenres('ClipsArchive')
elif menu == 'ListClipSortorders':
do_ListSortorders('ClipsArchive')
elif menu == 'Search':
do_Search(srch_string, int(srch_type))
elif menu == 'SearchHistory':
do_SearchHistory()
elif menu == 'DeleteHistory':
delete_history_item(srch_string, False)
xbmc.executebuiltin('Container.Refresh')
elif menu == 'ListLetters':
oddaje = ['A', 'B', 'C', 'Č', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'Š', 'T', 'U', 'V', 'W', 'Z', 'Ž', '0']
for o in oddaje:
li = xbmcgui.ListItem(o)
url = build_url(base, {'content_type': contentType, 'menu': 'ListShows', 'letter': o, 'api': api})
xbmcplugin.addDirectoryItem(handle=handle, url=url, listitem=li, isFolder=True)
elif menu == 'ListShows':
do_ListShows()
elif menu == 'ListStreams':
do_ListStreams()
else:
xbmcgui.Dialog().ok('RTV Slovenija', 'Neznan meni: ' + menu) # this never happens
# write contents
xbmcplugin.endOfDirectory(handle)
except Exception as e:
xbmcgui.Dialog().ok('RTV Slovenija', 'Prišlo je do napake:\n' + e.message)
|
import web
import db
import json
urls = (
"/trains", "trains",
"/trains/(\d+)", "train"
)
app = web.application(urls, globals())
class trains:
def GET(self):
trains = db.get_trains().list()
return json.dumps(trains)
class train:
def GET(self, id):
t = db.get_train(id)
if not t:
raise web.NotFound('{"error": "Not Found"}')
return json.dumps(t)
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
"""
Create argv[1] iff it doesn't already exist.
"""
outfile = sys.argv[1]
if os.path.exists(outfile):
sys.exit()
open(outfile, "wb").close()
|
x = 3
x += 2
print(x)
y = 20
y -= 3
print(y)
z = 30
z %= 10
print(z)
a = 2
a **= 3
print(a)
b = 40
b //= 4
print(b) |
# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
# import the necessary packages
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
from fbchat import Client
from fbchat.models import *
client = Client("lukas.grasse@uleth.ca", "parkingbot123")
thread_id = '527926877'
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=32,
help="max buffer size")
args = vars(ap.parse_args())
# initialize the list of tracked points, the frame counter,
# and the coordinate deltas
pts = deque(maxlen=args["buffer"])
counter = 0
last = 0
(dX, dY) = (0, 0)
direction = ""
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping: detection
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
frame = imutils.resize(frame, width=400)
# detect people in the image
(rects, weights) = hog.detectMultiScale(frame, winStride=(4, 4),
padding=(8, 8), scale=1.05)
# apply non-maxima suppression to the bounding boxes using a
# fairly large overlap threshold to try to maintain overlapping
# boxes that are still people
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
for (xA, yA, xB, yB) in pick:
cv2.rectangle(frame, (xA, yA), (xB, yB), (0, 255, 0), 2)
print(counter)
if(len(pick) != 0 and counter-last > 30):
print("Person Detected!")
last = counter;
name = "images/frame%d.jpg"%counter
cv2.imwrite(name, frame)
sent = client.sendLocalImage(name, message='parking alert', thread_id=thread_id, thread_type=ThreadType.USER)
#sent = client.send(friend.uid, "Person Detected")
if sent:
print("Message sent successfully!")
# show the frame to our screen and increment the frame counter
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
counter += 1
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
|
#!/usr/bin/python
import os, re, random
from bs4 import BeautifulSoup
totalFiles = int(open('parameters.txt', 'r').readlines()[0].strip())
testCount = min(5, totalFiles*1/100)
test = set(random.sample(list(xrange(totalFiles)), testCount))
filenumber = 0
for root, dirs, files in os.walk("./dataset/duc_2007"):
for filename in files:
match = re.search(".S", filename)
if match:
filenumber += 1
filePath = root + "/" + filename
datafile = open(filePath, 'r').read()
soup = BeautifulSoup(datafile)
if filenumber in test:
textdata = open('dataset/test/'+str(filenumber)+'.txt', 'w')
else:
textdata = open('dataset/train/'+str(filenumber)+'.txt', 'w')
for node in soup.findAll('text'):
for sentence in node.findAll('s'):
text = sentence.text.encode('ascii', 'ignore')
text = text.decode('utf-8')
text = text.replace('\n', '')
text = text.replace('.', '')
text = re.sub("[^\w\s]|_","", text)
text = re.sub(' +',' ',text)
text = text.lower()
textdata.write(text+'\n')
textdata.close()
|
class Pessoa:
def __init__(self, nome, idade, cpf):
self.nome = nome
self.idade = idade
self.cpf = cpf
self.acordado = True
def fazNiver(self):
self.idade = self.idade + 1
def dormir(self):
self.acordado = False
print(self.nome + 'esta dormindo')
def acordado(self):
self.acordado = True
print(self.nome + 'Acordou')
pessoa1 = Pessoa('Juvenal', 22, 333)
print(pessoa1.idade)
pessoa1.fazNiver()
print(pessoa1.idade)
print(pessoa1.acordado) |
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os, os.path
import logging
import math
from DynamicSchedulerGeneric.Analyzer import DataCollector
from DynamicSchedulerGeneric.Analyzer import AnalyzeException
class EstRecord:
def __init__(self, *data):
if len(data) == 3:
self.startt = int(data[0])
self.id = data[1]
self.deltat = int(data[2])
else:
pTuple = data[0]
self.startt = int(pTuple[0])
self.id = pTuple[1]
self.deltat = int(pTuple[2])
def __cmp__(self, item):
if self.startt < item.startt:
return -1
if self.startt > item.startt:
return 1
if self.id < item.id:
return -1
if self.id > item.id:
return 1
return 0
def __repr__(self):
return '%d %s %d' % (self.startt, self.id, self.deltat)
class BasicEstimator(DataCollector):
logger = logging.getLogger("BasicEstimator")
DEFAULT_STORE_DIR = "/var/tmp/info-dynamic-scheduler-generic"
DEFAULT_SAMPLE_NUM = 1000
def __init__(self, config, mjTable):
DataCollector.__init__(self, config, mjTable)
if config.has_option('Main', 'sample_number'):
self.sampleNumber = int(config.get('Main', 'sample_number'))
else:
self.sampleNumber = BasicEstimator.DEFAULT_SAMPLE_NUM
if config.has_option('Main', 'sample_dir'):
self.storeDir = config.get('Main', 'sample_dir')
else:
self.storeDir = BasicEstimator.DEFAULT_STORE_DIR
if not os.path.isdir(self.storeDir) or not os.access(self.storeDir, os.W_OK):
raise AnalyzeException("Cannot find or access directory %s" % self.storeDir)
self.buffer = dict()
self.nqueued = dict()
self.nrun = dict()
def register(self, evndict):
qname = evndict['queue']
if not qname in self.nqueued:
self.nqueued[qname] = 0
if not qname in self.nrun:
self.nrun[qname] = 0
if evndict['state'] == 'queued':
self.nqueued[qname] += 1
if evndict['state'] == 'running':
self.nrun[qname] += 1
if 'start' in evndict:
if not qname in self.buffer:
self.buffer[qname] = list()
BasicEstimator.logger.debug('Updating service time for ' + str(evndict))
record = EstRecord(evndict['start'], evndict['jobid'], self.now - evndict['start'])
self.buffer[qname].append(record)
# Given:
# N number of queued jobs
# R number of running jobs
# K number of slots
# Savg average service time
# Smax max service time
#
# for each iteration we have:
#
# / 0 R < K
# ERT = |
# \ ceil((N / K) + 1) * Savg R = K
#
# / 0 R < K
# WRT = |
# \ ceil((N / K) + 1) * Smax R = K
#
def estimate(self):
for qname in self.buffer:
if len(self.buffer[qname]) == 0:
BasicEstimator.logger.debug('No events for %s' % qname)
continue
if self.nqueued[qname] > 0:
nslots = self.nrun[qname]
else:
#undef K
nslots = -1
self.buffer[qname].sort()
buffIdx = 0
tmpl = list()
qFilename = os.path.join(self.storeDir, qname)
qFile = None
try:
if os.path.exists(qFilename):
qFile = open(qFilename)
for line in qFile:
tmpt = line.strip().split()
if len(tmpt) < 2:
continue
if tmpt[0] == "#nslot" and nslots < 0:
nslots = int(tmpt[1])
continue
if len(tmpt) < 3:
continue
tmprec = EstRecord(tmpt)
if buffIdx < len(self.buffer[qname]):
crsr = self.buffer[qname][buffIdx]
if tmprec < crsr:
tmpl.append(tmprec)
BasicEstimator.logger.debug('Registered %s' % str(tmprec))
else:
tmpl.append(crsr)
buffIdx += 1
BasicEstimator.logger.debug('Registered %s' % str(crsr))
else:
tmpl.append(tmprec)
BasicEstimator.logger.debug('Registered %s' % str(tmprec))
qFile.close()
qFile = None
while buffIdx < len(self.buffer[qname]):
crsr = self.buffer[qname][buffIdx]
tmpl.append(crsr)
buffIdx += 1
BasicEstimator.logger.debug('Registered %s' % str(crsr))
if len(tmpl) > self.sampleNumber:
del tmpl[0:len(tmpl)-self.sampleNumber]
# number of slot is still undefined
# force R == K
if nslots < 0:
nslots = self.nrun[qname]
if self.nrun[qname] < nslots:
self.setERT(qname, 0)
self.setWRT(qname, 0)
else:
tmpAvg = 0
tmpMax = -1
for tmprec in tmpl:
tmpAvg = tmpAvg + tmprec.deltat
tmpMax = max(tmpMax, tmprec.deltat)
tmpAvg = int(tmpAvg/len(tmpl))
tmpFact = int(math.ceil(float(self.nqueued[qname]) / float(nslots) + 1.0))
BasicEstimator.logger.debug("Factor: %d" % tmpFact)
BasicEstimator.logger.debug("Savg: %d" % tmpAvg)
BasicEstimator.logger.debug("Smax: %d" % tmpMax)
self.setERT(qname, tmpFact * tmpAvg)
self.setWRT(qname, tmpFact * tmpMax)
qFile = open(qFilename, 'w')
qFile.write("#nslot %d\n" % nslots)
for tmprec in tmpl:
qFile.write(str(tmprec) + "\n")
qFile.close()
qFile = None
except:
BasicEstimator.logger.error("Error reading %s" % qFilename, exc_info=True)
if qFile:
try:
qFile.close()
except:
BasicEstimator.logger.error("Cannot close %s" % qFilename, exc_info=True)
oldQueues = os.listdir(self.storeDir)
for tmpq in oldQueues:
try:
if not self.buffer.has_key(tmpq):
os.remove(os.path.join(self.storeDir, tmpq))
except:
BasicEstimator.logger.error("Cannot remove %s" % tmpq, exc_info=True)
def getEstimatorList():
return [ BasicEstimator ]
|
"""
Script to create log summary of all .csv log files in folder db_testing
"""
import os
import csv
from Constants import LOG_SUMMARY_PATH, DB_TESTING_FOLDER, DB_2A_FOLDER, LOG_SUMMARY_2A_PATH, \
LOG_TOTAL_DB_2A_PATH, LOG_TOTAL_PARAM_2A_PATH, SPLIT_COMPUTATION
def create_log_summary(log_summary_path, db_testing_folder):
initiate_log_summary(log_summary_path)
# loop through whole database (folder)
all_paths = os.listdir(db_testing_folder)
fvc_flag = False
one_vs_one_flag = False
for log_name_original in all_paths:
if log_name_original.startswith('1_vs_1_'):
log_name = log_name_original[len('1_vs_1_'):]
one_vs_one_flag = True
elif log_name_original.startswith('fvc_'):
log_name = log_name_original[len('fvc_'):]
fvc_flag = True
else:
log_name = log_name_original
# only consider db_ logs, param_ logs information are written into same row
if log_name.startswith('db_'):
log_info_original = log_name.strip('db_')
log_info = log_info_original.strip('.csv').split('_')
assert len(log_info) == 4
poly = log_info[0].strip('poly')
minu = log_info[1].strip('minu')
date = log_info[2]
time = log_info[3]
# Determine log file
if fvc_flag:
db_log_file = db_testing_folder + 'fvc_' + log_name
elif one_vs_one_flag:
db_log_file = db_testing_folder + '1_vs_1_' + log_name
else:
db_log_file = db_testing_folder + log_name
# Read db file and calculate false positives (FMR) and false negatives (FNMR)
with open(db_log_file) as db_log:
# headerline
next(db_log)
# false positive and false negative totals
false_positives = 0
false_negatives = 0
row_total_db = 0
genuine_matches = 0
genuine_non_matches = 0
for row in csv.reader(db_log, delimiter=';'):
row_total_db += 1
# if fingers are the same: genuine match
if row[0] == row[2]:
genuine_matches += 1
# match of algorithm is falsch (false) -> should have been true
if row[4] == 'falsch' or row[4] == 'invalid probe':
false_negatives += 1
# fingers are not the same: no genuine match
else:
genuine_non_matches += 1
# match of algorithm is wahr (true) -> should have been false
if row[4] == 'wahr':
false_positives += 1
fmr = round(false_positives / genuine_non_matches, 5)
fnmr = round(false_negatives / genuine_matches, 5)
print(genuine_matches)
print(genuine_non_matches)
if fvc_flag:
param_log_file = db_testing_folder + 'fvc_' + 'param_' + log_info_original
elif one_vs_one_flag:
param_log_file = db_testing_folder + '1_vs_1_' + 'param_' + log_info_original
else:
param_log_file = db_testing_folder + 'param_' + log_info_original
# Read param file and calculate averages
with open(param_log_file) as param_log:
# headerline
next(param_log)
# metrics
encode_time_sum = 0
decode_time_sum = 0
interpol_time_sum = 0
total_time_sum = 0
row_total_param = 0
first = True
chaff_points_number = 0
thresholds = ''
subset_eval = ''
for row in csv.reader(param_log, delimiter=';'):
encode_time_sum += float(row[9])
decode_time_sum += float(row[10])
interpol_time_sum += float(row[12])
total_time_sum += float(row[14])
row_total_param += 1
if first:
chaff_points_number = int(row[3])
thresholds = row[4]
if len(row) >= 22:
subset_eval = row[21]
first = False
assert row_total_db == row_total_param or int(minu) > 46
encode_time_avg = round(encode_time_sum / row_total_param, 2)
decode_time_avg = round(decode_time_sum / row_total_param, 2)
interpol_time_avg = round(interpol_time_sum / row_total_param, 2)
total_time_avg = round(total_time_sum / row_total_param, 2)
log_summary_one_experiment(poly, minu, date, time, chaff_points_number, thresholds, row_total_db,
genuine_non_matches, genuine_matches, false_positives, false_negatives, fmr, fnmr,
encode_time_avg, decode_time_avg, interpol_time_avg, total_time_avg,
subset_eval, log_summary_path)
print('Finished writing all log summaries')
def log_summary_2a():
combine_log_files(LOG_TOTAL_DB_2A_PATH, DB_2A_FOLDER, '_db_')
combine_log_files(LOG_TOTAL_PARAM_2A_PATH, DB_2A_FOLDER, '_param_')
create_log_summary(LOG_SUMMARY_2A_PATH, DB_2A_FOLDER)
def combine_log_files(log_total_path, folder_path, keyword):
# clear log
open(log_total_path, 'w+').close()
all_paths = os.listdir(folder_path)
all_paths.sort()
first_file = True
with open(log_total_path, 'a') as log_summary:
for log_name in all_paths:
if keyword in log_name:
with open(folder_path + log_name) as log:
print('Writing from {}'.format(log_name))
header = next(log)
if first_file:
log_summary.write(header)
first_file = False
for line in log:
log_summary.write(line)
print('Finished combining {}'.format(log_total_path))
def initiate_log_summary(log_summary_path):
""" clear log file and add log header for summary logging """
open(log_summary_path, 'w+').close()
with open(log_summary_path, 'a') as log:
log.write('polynomial degree;'
'# minutia points;'
'# chaff points;'
'thresholds (x/y/theta/total/theta basis);'
'# matches total;'
'# genuine non-matches;'
'# genuine matches;'
'false positives;'
'false negatives;'
'FMR;'
'FNMR;'
'avg encode time [s];'
'avg decode time [s];'
'avg interpolation time [s];'
'avg total time [s];'
'subset eval;'
'date;'
'time\n')
def log_summary_one_experiment(poly, minu, date, time, chaff_points, thresholds, row_total, genuine_non_matches, genuine_matches,
false_positives, false_negatives, fmr, fnmr,
avg_encode, avg_decode, avg_interpol, avg_total, subset_eval, log_summary_path):
""" Log one experiment summary to summary log file """
with open(log_summary_path, 'a') as log_summary:
log_summary.write('{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{};{}\n'.format(
poly, minu, chaff_points, thresholds, row_total, genuine_non_matches, genuine_matches,
false_positives, false_negatives, fmr, fnmr, avg_encode, avg_decode, avg_interpol,
avg_total, subset_eval, date, time))
print('Finished writing summary for poly {} and minu {}'.format(poly, minu))
if __name__ == '__main__':
if SPLIT_COMPUTATION:
log_summary_2a()
else:
create_log_summary(LOG_SUMMARY_PATH, DB_TESTING_FOLDER)
|
from django.urls import path
from .views import (
AuthorListAPIView,
AuthorDetailAPIView,
BookListAPIView,
BookDetailAPIView,
)
urlpatterns = [
path("author", AuthorListAPIView.as_view()),
path("author/<int:pk>", AuthorDetailAPIView.as_view()),
path("book", BookListAPIView.as_view()),
path("book/<int:pk>", BookDetailAPIView.as_view()),
]
|
import numpy as np
from TwoLayerNet import TwoLayerNet
# 5.7.4 ---------------------------
from DataSet.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_loss_list = []
iters_num = 1000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
net = TwoLayerNet(input_size=784, hidden_size=50,output_size=10)
for i in range(iters_num):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grad = net.gradient(x_batch, t_batch)
for key in ('W1', 'b1', 'W2', 'b2'):
net.params[key] -= learning_rate * grad[key]
loss = net.loss(x_batch, t_batch)
train_loss_list.append(loss)
import matplotlib.pyplot as plt
plt.plot(range(len(train_loss_list)), train_loss_list)
plt.show() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 元组的方法,元组只有两个方法
# count(x) 查询x在元组中出现的次数
# index(x) 查询x在孕足中第一次出现的位置
# 元组不可变,但是元组中嵌套的内容可以改变
list1 = [2, 3]
print('这里有个一列表:\n\t', list1)
t = (1, list1, 4)
print('将列表作为元组的元素t = (1, list1, 4)\n\t', t)
list1[1] = 99
print('修改列表内的元素list1[1] = 99\n\t', list1)
print('此时元组t的内容为:\n\t', t)
print('-' * 60)
|
def _filter_attribute(attr: str):
return not attr.startswith('_') and attr != 'to_dict'
class SysConfig:
# system
DEBUG = False
TESTING = False
ENVIRONMENT = 'default'
SECRET_KEY = ''
JWT_SECRET = ''
APP_NAME = ''
# kernel
AUTH_BACKENDS = []
MAX_CONTENT_LENGTH = 1048576
ALLOWED_EXTENSIONS = ['png', 'jpg', 'jpeg', 'gif']
LOG_FILE_NAME = 'logs/server.log'
LOG_FORMAT = ("\n%(levelname)s - %(asctime)s - in %(module)s "
"[%(pathname)s:%(lineno)d]:\n%(message)s\n")
# database
SQLALCHEMY_DATABASE_URI = None
SQLALCHEMY_ECHO = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
# redis
REDIS_HOST = "localhost"
REDIS_PORT = 6379
REDIS_PASS = None
REDIS_DB = 0
REDIS_DEFAUTL_EX = None
def to_dict(self) -> dict:
return {k: getattr(self, k) for k in dir(self) if _filter_attribute(k)}
|
import click
import colorama
from sdcli.src.lib import info
from sdcli.src.lib import generator as creator
from sdcli.src.lib.error import print_output_error
@click.group()
def cli():
'''
CLI create by Streamelopers for generate our configuration on OBS.
'''
pass
@cli.command(help='For generate new config file.')
# @click.option('--event', prompt='Type of event: ', help='Specificate type of event.')
@click.option('--name', '-n', nargs=1, default='speaking', required=True,
prompt='Name of Scenes Collection', help='Name Scnenes Collection')
def generate(name):
try:
creator._generator(name)
except Exception as ex:
print_output_error(message='Error while creating config file...', ex=str(ex))
@cli.command(help='Print information of our social medias.')
def information():
info._info()
@cli.command(help='Print version of SGen.')
def version():
info.version()
|
# -*- coding: utf-8 -*-
from heapq import heappop, heappush
from typing import List
class MaxHeap:
def __init__(self):
self.count = 0
self.els = []
def __len__(self):
return self.count
def _max(self):
_, el = self.els[0]
return el
def pop(self):
self.count -= 1
_, el = heappop(self.els)
return el
def push(self, el):
self.count += 1
heappush(self.els, (-el, el))
class Solution:
def minimumCost(self, cost: List[int]) -> int:
heap = MaxHeap()
for el in cost:
heap.push(el)
result = 0
while len(heap) >= 3:
result += heap.pop()
result += heap.pop()
heap.pop()
while heap:
result += heap.pop()
return result
if __name__ == "__main__":
solution = Solution()
assert 5 == solution.minimumCost([1, 2, 3])
assert 23 == solution.minimumCost([6, 5, 7, 9, 2, 2])
assert 10 == solution.minimumCost([5, 5])
|
#!/usr/bin/env python3
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import dsa
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import *
import binascii as ba
import socketserver
import sys
import os
def generate_shared_key():
return rsa.generate_private_key(65537,2048,default_backend()).public_key().public_bytes(Encoding.PEM,PublicFormat.SubjectPublicKeyInfo)
def generate_rsa_prvkey():
return rsa.generate_private_key(65537,2048,default_backend())
def generate_rsa_pubkey(private_key):
return private_key.public_key()
def signing(private_key,message):
return private_key.sign(message,padding.PSS(mgf=padding.MGF1(hashes.SHA256()),salt_length=padding.PSS.MAX_LENGTH),hashes.SHA256())
def signature_verification(public_key,signature,message):
if public_key.verify(signature,message,padding.PSS(mgf=padding.MGF1(hashes.SHA256()),salt_length=padding.PSS.MAX_LENGTH),hashes.SHA256()) == None:
return True
else:
return False
def encrypt_data(public_key,message):
return public_key.encrypt(message,padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),algorithm=hashes.SHA256(),label=None))
def decrypt_data(private_key,ciphertext):
return private_key.decrypt(ciphertext,padding.OAEP(mgf=padding.MGF1(algorithm=hashes.SHA256()),algorithm=hashes.SHA256(),label=None))
class server_handler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.private_key = generate_rsa_prvkey()
self.state = 0
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
def handle(self):
self.data = self.request.recv(3072).strip()
if self.state == 0 and self.data == b'Hello':
self.state = 1
print(self.data, self.state)
response = b'Hey There'
self.request.sendall(response)
else:
response = b'Wrong Message'
self.request.sendall(response)
return
self.data = self.request.recv(3072).strip()
if self.state == 1 and self.data == b'Public Key?':
self.state = 2
print(self.data, self.state)
public_key = generate_rsa_pubkey(self.private_key).public_bytes(Encoding.PEM,PublicFormat.SubjectPublicKeyInfo)
response = b'Server public key:' + public_key
self.request.sendall(response)
else:
response = b'I do not understand you'
self.request.sendall(response)
return
self.data = self.request.recv(3072).strip()
if self.state == 2 and bytearray(self.data)[0:18] == b'Client public key:':
client_pubkey = load_pem_public_key(bytes(bytearray(self.data)[18:]), default_backend())
#print(client_pubkey)
shared_key = generate_shared_key()
#shared_key_client = encrypt_data(client_pubkey,shared_key)
#print(len(shared_key))
#print("\n\n\n")
#signature = signing(self.private_key,shared_key)
response = b'Shared Key:'+ shared_key
self.request.sendall(response)
self.data = self.request.recv(3072).strip()
print(self.data)
if bytearray(self.data) == b'Initiation Process Complete':
while(True):
self.data = self.request.recv(3072).strip()
response = b'Ciphertext Received'
self.request.sendall(response)
print(self.data)
if bytearray(self.data)[0:8] == b'Request:':
ciphertext = bytes(bytearray(self.data)[8:])
#ciphertext = ciphertext.decode("utf-8")
plaintext = decrypt_data(self.private_key,ciphertext)
print(plaintext)
if plaintext == b'list':
directory = os.listdir("/root/ftp/")
directory = bytes('\n'.join(directory),'utf-8')
ciphertext = encrypt_data(client_pubkey,directory)
signature_server = signing(self.private_key,directory)
self.data = self.request.recv(3072).strip()
if bytearray(self.data)[0:10] == b'Signature:':
signature = bytes(bytearray(self.data)[10:])
if signature_verification(client_pubkey,signature,plaintext):
response = b'Response:' + ciphertext
self.request.sendall(response)
self.data = self.request.recv(3072).strip()
print(self.data)
response = b'Signature:' + signature_server
self.request.sendall(response)
else:
response = b"Response:can't verify the sender"
self.request.sendall(response)
else:
pass
else:
break
#response = b'Signature:' + signature
#self.request.sendall(response)
self.state = 0
return
def main():
host, port = '', 7777
server_side = socketserver.TCPServer((host, port), server_handler)
try:
server_side.serve_forever()
except KeyboardInterrupt:
server_side.shutdown(0)
sys.exit(0)
main()
|
from web.services.notion_service.read import get_all_documents
|
import paho.mqtt.client as mqtt
import pymysql
import json
import threading
import time
import datetime
mqttc = mqtt.Client("subscriber_uji", clean_session=False)
class connectDB:
def __init__(self, id_rfid, pub_waktu_ctime, pub_waktu_datetime, sub_waktu_ctime, sub_waktu_datetime):
self.id_rfid = id_rfid
self.pub_waktu_ctime = pub_waktu_ctime
self.pub_waktu_datetime = pub_waktu_datetime
self.sub_waktu_ctime = sub_waktu_ctime
self.sub_waktu_datetime = sub_waktu_datetime
def getData(self):
id_rfid = self.id_rfid
pub_waktu_ctime = self.pub_waktu_ctime
pub_waktu_datetime = self.pub_waktu_datetime
sub_waktu_ctime = self.sub_waktu_ctime
sub_waktu_datetime = self.sub_waktu_datetime
print("ID RFID : "+id_rfid)
print("Waktu menggunakan ctime")
print("Waktu di publisher : "+pub_waktu_ctime)
print("Waktu di subscriber : "+sub_waktu_ctime)
print("Waktu menggunakan datetime")
print("Waktu di publisher : "+pub_waktu_datetime)
print("Waktu di subscriber : "+sub_waktu_datetime)
print("--------------------------------------------")
def pesan_masuk(mqttc, obj, msg):
dictionary_pengujian = json.loads(msg.payload.decode('utf-8')) # Untuk json load pada Python 3
id_rfid = dictionary_pengujian["id_rfid"]
pub_waktu_ctime = dictionary_pengujian["waktu_ctime"]
pub_waktu_datetime = dictionary_pengujian["waktu_datetime"]
# Membuat waktu subscriber
sub_waktu_ctime = str(time.ctime())
x = datetime.datetime.now()
sub_detik = x.second
sub_microsecond = x.microsecond
sub_waktu_datetime = str(sub_detik)+" : "+str(sub_microsecond)
connect = connectDB(str(id_rfid), pub_waktu_ctime, pub_waktu_datetime, sub_waktu_ctime, sub_waktu_datetime)
connect.getData()
mqttc.on_message = pesan_masuk
# MEMBUAT KONEKSI KE BROKER MQTT
ip_broker = "127.0.0.1" #IP broker
port = 1883
mqttc.connect(ip_broker, port)
def subscribing(topic, qos):
topic = topic
qos = qos
mqttc.subscribe(topic, qos=qos)
print("Subscriber ready")
print("=======================================")
try:
topic = "rfid/readrfid/position/pengujian"
qos = 1
t = threading.Thread(target=subscribing(topic, qos))
except:
print("Error thread")
mqttc.loop_forever()
|
#!/usr/bin/python
import sys
import subprocess
import pandas as pd
import csv
dataset = open('dataset_S350.csv', 'r')
test_file = csv.reader(dataset, delimiter=',')
i = 0
classes = []
for line in test_file:
if(i == 0):
i = 1
continue
if(float(line[11]) >= 0):
classes.append(1)
else:
classes.append(-1)
print(len(classes))
f = pd.read_csv('dataset_S350.csv')
f['class'] = classes
f.to_csv('dataset_S350.csv')
"""
pdb_id = ''
wild_type = ''
mutant = ''
position = 0
chain = ''
predicted_class = []
i = 0
j = 0
for record in test_file:
if i==0:
i = 1
continue
pdb_id = record[0]
chain_id = record[0]
pdb_id = pdb_id[0:-1]
wild_type = record[1]
position = record[2]
mutant = record[3]
chain = chain_id[-1]
predicted_value = subprocess.check_output(['./stability_predictor.py', '%s' %pdb_id, '%s' %chain , '%s' %position , '%s' %wild_type, '%s' %mutant])
j +=1
predicted_class.append(predicted_value)
print('Protein' + str(j) + 'done')
save_file = pd.read_csv('dataset_S350.csv')
save_file['predicted3'] = predicted_class
save_file.to_csv('dataset_S350.csv')
""" |
import json
import math
import os
import cv2
from PIL import Image
import numpy as np
from keras import layers
from keras.applications import DenseNet121
from keras.applications import inception_resnet_v2
from keras.callbacks import Callback, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, accuracy_score
from sklearn.metrics import classification_report, confusion_matrix
import scipy
import tensorflow as tf
#from tqdm import tqdm
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Flatten
from keras import layers
import keras
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="1,2"
import tensorflow as tf
tf.device('/cpu:0')
#from network import *
from load_data import *
from preprocess import *
from model import *
from Inception_Resnet_model import *
from pre_process_test_images import *
from simple_model import *
np.random.seed(2019)
tf.set_random_seed(2019)
print("Called Loaddata")
train_df, test_df = load_data()
print("Called Preprocessing")
x_train, x_val, y_train, y_val = preprocess_data(train_df, test_df)
#model = build_model_Inception_Resnet() #For Densenet
#model = build_model_Inception_Resnet() #For Inception-Resnet
model = build_simple_model() #For simple model
model.summary()
BATCH_SIZE = 32
def create_datagen():
return ImageDataGenerator(
zoom_range=0.50, # set range for random zoom
# set mode for filling points outside the input boundaries
fill_mode='constant',
cval=0., # value used for fill_mode = "constant"
horizontal_flip=True, # randomly flip images
vertical_flip=True, # randomly flip images
)
# Using original generator
data_generator = create_datagen().flow(x_train, y_train, batch_size=BATCH_SIZE, seed=2019)
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_kappas = []
def on_epoch_end(self, epoch, logs={}):
X_val, y_val = self.validation_data[:2]
y_val = y_val.sum(axis=1) - 1
y_pred = self.model.predict(X_val) > 0.5
y_pred = y_pred.astype(int).sum(axis=1) - 1
_val_kappa = cohen_kappa_score(
y_val,
y_pred,
weights='quadratic'
)
self.val_kappas.append(_val_kappa)
print("val_kappa: {_val_kappa:.4f}")
if _val_kappa == max(self.val_kappas):
print("Validation Kappa has improved. Saving model.")
self.model.save('model.h5')
return
kappa_metrics = Metrics()
history = model.fit_generator(
data_generator,
steps_per_epoch=x_train.shape[0] / BATCH_SIZE,
epochs=10,
validation_data=(x_val, y_val),
callbacks=[kappa_metrics]
)
with open('history.json', 'w') as f:
json.dump(history.history, f)
history_df = pd.DataFrame(history.history)
history_df[['loss', 'val_loss']].plot()
history_df[['acc', 'val_acc']].plot()
plt.plot(kappa_metrics.val_kappas)
y_pred = model.predict(x_val)
print("0")
print(y_pred)
#print(accuracy_score(y_val, y_pred))
# y_test = model.predict(x_test) > 0.7
# y_test = y_test.astype(int).sum(axis=1) - 1
# test_df['diagnosis'] = y_test
# test_df.to_csv('submission.csv',index=False)
print(y_val)
print("1")
y_val_1d = np.ndarray(y_val.shape[0])
for i in range(y_val.shape[0]):
y_val_1d[i] = y_val[i].sum()
y_pred_1d = np.ndarray(y_pred.shape[0])
for i in range(y_pred.shape[0]):
y_pred_1d[i] = y_pred[i].sum()
print(confusion_matrix(y_val_1d, y_pred_1d))
test_df = pd.read_csv('IDRiD_Disease Grading_Testing Labels.csv')
test_df.columns = ["id_code", "diagnosis", "discard"]
x_test, y_test = preprocess_test_data(test_df)
y_test_pred = model.predict(x_test) > 0.5
print(accuracy_score(y_test, y_test_pred))
print()
y_test_1d = np.ndarray(y_test.shape[0])
for i in range(y_test.shape[0]):
y_test_1d[i] = y_test[i].sum()
y_test_pred_1d = np.ndarray(y_test_pred.shape[0])
for i in range(y_test_pred.shape[0]):
y_test_pred_1d[i] = y_test_pred[i].sum()
print(confusion_matrix(y_test_1d, y_test_pred_1d)) |
from molecularfunctionsOOP import particleClass
#set global constants
Np=108
deltat=.004
mass = 1
dens = 0.85
temp = 0.9
particles = particleClass(Np, dens, temp,mass)
particles.changeForces()
print "Begin:"
particles.checkEnergy ()
particles.checkMomenta()
for i in range(10000):
#print "i= ",i
particles.update(deltat)
#particles.checkEnergy ()
#particles.checkMomenta()
print "End: "
particles.checkEnergy ()
particles.checkMomenta()
|
from . import stereo
|
#!/usr/bin/env python26
# -*- coding: utf-8 -*-
__author__="superwen"
__date__ ="$2013-08-07 17:19:10$"
from TvProgramBot.db.myStringFilter import getFilterTitle
s = [
['雄风剧场:读心专家 17', '读心专家'],
['法治时空 1194', '法治时空'],
['海豚万家剧场:爱在旅途Ⅱ 20', '爱在旅途2'],
['前情提要《花木兰传奇》37', '花木兰传奇'],
['非常6+1(1)', '非常6+1'],
['电视剧:穿越烽火线 9', '穿越烽火线'],
['传奇 426', '传奇'],
['星光大道1/3', '星光大道'],
['海豚星光剧场:佳恩别哭 46', '佳恩别哭'],
['夜线60分', '夜线60分'],
['法治时空 1194', '法治时空'],
['2013一起聊聊241', '2013一起聊聊'],
['红娘子 43', '红娘子'],
['甄嬛传 4','甄嬛传'],
['转播中央台新闻联播','新闻联播'],
['笑动2013','笑动2013'],
['今晚80后脱口秀','今晚80后脱口秀'],
['《白领剧场》二集:爱似百汇11―12','爱似百汇'],
['英雄剧场:红娘子 22','红娘子'],
['动画片:熊出没 73','熊出没'],
['巴渝剧场:西游记 37','西游记'],
['深夜剧场:甄嬛传 48','甄嬛传'],
['东南剧苑:牵牛的夏天 13','牵牛的夏天'],
['蓝海影院:家有辣嫂','家有辣嫂'],
['雄关剧场:家常菜 6','家常菜'],
['花雨剧场:前妻的车站 14','前妻的车站'],
['神枪(19)','神枪'],
['汉字英雄(重播版)','汉字英雄'],
['阳光剧场:爱情公寓Ⅱ','爱情公寓2'],
['大王·小王(重播)','大王·小王'],
['重播:中国星事记','中国星事记'],
['情感剧场:爱情公寓精装版','爱情公寓'],
['梦剧场连续剧:血战到底','血战到底'],
['12点报道','12点报道'],
['黄河大剧院:甄嬛传30','甄嬛传']
]
if __name__ == "__main__":
for ss in s:
t,p = getFilterTitle(ss[0])
if t == ss[1].strip():
print t,"\t","+++"
else:
print t,"\t",ss[1] |
#!/usr/bin/env python3
# Created by Marlon Poddalgoda
# Created on December 2020
# This program is an updated guessing game
import random
def main():
# this function compares an integer to a random number
print("Today we will play a guessing game.")
# random number generation
random_number = random.randint(0, 9)
# input
user_guess = input("Enter a number between 0-9: ")
print("")
# process
try:
user_guess_int = int(user_guess)
if user_guess_int == random_number:
# output
print("Correct! {} was the right answer."
.format(random_number))
else:
# output
print("Incorrect, {} was the right answer."
.format(random_number))
except Exception:
# output
print("That's not a number! Try again.")
finally:
# output
print("")
print("Thanks for playing!")
if __name__ == "__main__":
main()
|
from sqlalchemy import *
from migrate import *
meta = MetaData()
meetup_tbl = Table('meetup', meta)
meetupcom_id_col = Column('meetupcom_eventid', String, nullable=True)
def upgrade(migrate_engine):
meta.bind = migrate_engine
meetup_tbl.create_column(meetupcom_id_col)
pass
def downgrade(migrate_engine):
meta.bind = migrate_engine
meetup_tbl.drop_column(meetupcom_id_col)
pass
|
from django.urls import path
from . import views
#from .views import UserList, UserDetail
urlpatterns = [
#path('', views.index, name="index"),
path('register', views.register, name='register'),
path('register/visitor', views.visitor_register.as_view(), name='visitor_register'),
path('register/staff', views.staff_register.as_view(), name='staff_register'),
path('login', views.login_view, name='login'),
path('logout', views.logout_view, name='logout')
#path('', UserList.as_view()),
#path('<int:pk>', UserDetail.as_view()),
]
|
#!venv/bin/python
import os
import sys
from io import BytesIO
from PIL import Image
from mutagen.mp3 import MP3
from mutagen.mp4 import MP4
walk_dir = sys.argv[1]
wrongBitRate = set()
for root, subdirs, files in os.walk(walk_dir):
for file in files:
fileName, fileExt = os.path.splitext(file)
if fileExt == ".mp3":
audioFile = MP3(os.path.join(root, file))
if (int(audioFile.info.bitrate)/1000) < 320:
wrongBitRate.add((os.path.join(root, file), int(audioFile.info.bitrate)/1000))
elif fileExt == ".m4a":
audioFile = MP4(os.path.join(root, file))
if (int(audioFile.info.bitrate)/1000) < 256:
wrongBitRate.add((os.path.join(root, file), int(audioFile.info.bitrate)/1000))
print("--------------------\n Incorrect Bitrate \n--------------------")
for i in sorted(list(wrongBitRate)):
print(i[0], "-", i[1]) |
s = input('Nhap chuoi: ')
if len(s) > 2 :
s_n = s[:2] + s[len(s)-2:]
else :
s_n = ''
print(s_n)
|
import matplotlib as mpl
mpl.use('TkAgg')
import matplotlib.pyplot as plt
import geopandas as gpd
# import pysal as ps
import seaborn as sns
from pyproj import Proj, transform
from shapely.geometry import Point
import pandas as pd
import numpy
# from pysal.contrib.viz import mapping as maps
from classes.classDBOperations import DBOperations
# from shapely.ops import transform
# from functools import partial
# import pyproj
class createMaps:
def __init__(self, dbOperations=None):
self.dbOperations = dbOperations
def mapAllListingsCH(self, shapefile=gpd.GeoDataFrame.from_file('datasets/PLZO_SHP_LV03/PLZO_OS.shp')):
locations = []
if self.dbOperations is None:
self.dbOperations = DBOperations.getDB()
self.dbOperations.getConnection()
try:
with DBOperations.connection.cursor() as cursor:
sql = "SELECT * FROM `listingDetails` WHERE (latitude <= 47.829906 AND latitude >= 45.795286 AND longitude >= 5.855986 AND longitude <= 10.584407) " # WE ONLY COMPARE NEWLY SCRAPED URLS WITH URLS FROM THE LAST 6 MONTHS, AFTER THAT CHANCE IS SMALL THAT A LISTING IS STILL ONLINE
cursor.execute(sql)
items = cursor.fetchall()
for item in items:
if item["latitude"] is not None:
lat = float(item["latitude"])
long = float(item["longitude"])
if item['price'] is not None and item['size'] is not None:
price = float(item["price"])
size = int(item['size'])
locations.append({"latitude":lat,"longitude":long,"price":price,"size":size})
df = pd.DataFrame(locations, columns = ['latitude', 'longitude',"price","size"])
df['pricePerSq'] = df.apply(lambda row: (row.price / row.size) ** 0.5, axis=1)
# CONVERT SHAPEFILE
shp = shapefile.to_crs(epsg=4326)
# CREATE GEO DATAFRAME
geometry = [Point(xy) for xy in zip(df.longitude, df.latitude)]
df = df.drop(['longitude', 'latitude'], axis=1)
gdf = gpd.GeoDataFrame(df, geometry=geometry)
# TAKE OUT EXTREME VALUES FROM SET
elements=numpy.array(gdf['pricePerSq'])
mean= numpy.mean(elements, axis=0)
sd=numpy.std(elements, axis=0)
gdf = gdf.loc[gdf['pricePerSq'] > (mean - 2 * sd)]
gdf = gdf.loc[gdf['pricePerSq'] < (mean + 2 * sd)]
# CREATE plot
vmin, vmax = gdf['pricePerSq'].min(), gdf['pricePerSq'].max()
print(gdf['pricePerSq'].max())
cmap = sns.cubehelix_palette(as_cmap=True)
base = shp.plot(figsize=(40, 20), linewidth=0.25, edgecolor='#999999', color='#ffffff')
points = gdf.plot(markersize=20, column='pricePerSq', s='pricePerSq', k=100, cmap=cmap, legend=True, alpha=0.5, vmin=vmin, vmax=vmax, ax=base)
fig = points.get_figure()
cax = fig.add_axes([0, 5, 10, 20])
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm._A = []
fig.colorbar(sm, cax=cax)
# sm = plt.cm.ScalarMappable(cmap='viridis_r', norm=plt.Normalize(vmin=vmin, vmax=vmax))
# set_axis_off(points)
# colorbar(points)
# points.legend(title="legend")
finally:
print("create mapAllListingsCH SUCCESS")
cursor.close()
plt.show()
|
n, k = map(int, input().split())
remaining_time = 240 - k
# print(remaining_time)
for i in range(n, 0, -1):
# print(i)
if (5 * i * (i + 1)) / 2 <= remaining_time:
print(i)
exit()
print(0)
|
#!/usr/bin/env python3
#
import os, sys, json
import argparse
from nltk.tokenize import sent_tokenize
from tqdm import tqdm
import pdb
class Preprocess(object):
"""docstring for Preprocess"""
def __init__(self, args):
super(Preprocess, self).__init__()
self.data_path = args.data_path
self.mode = args.mode
self.data_dir = os.path.join(self.data_path, self.mode)
def sentenize(self):
"""
split text into sentences
"""
self.processed_data = {}
for file_name in tqdm(os.listdir(self.data_dir)):
if file_name.endswith('.txt'):
# # if this is a text file
file_path = os.path.join(self.data_dir, file_name)
idx = -1
with open(file_path) as file:
for line in file.readlines():
if line:
# # if this is not an empty line
for sent in sent_tokenize(line):
idx += 1
sent_id = '{:08d}_{:08d}'.format(int(file_name.split('.')[0]), idx)
self.processed_data[sent_id] = sent
def save_file(self):
"""
save processed data into json file
"""
self.target_file_path = os.path.join(self.data_dir, 'processed.json')
with open(self.target_file_path, 'w') as target_file:
json.dump(self.processed_data, target_file, indent=4)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='train')
parser.add_argument('--data_path', default='/home/wuqy1203/pg19/deepmind-gutenberg/')
args = parser.parse_args()
preprocess = Preprocess(args)
preprocess.sentenize()
preprocess.save_file()
if __name__ == '__main__':
main()
|
class Stats(object):
# For the moment, lets define this as raw stats from gear + race; AP is
# only AP bonuses from gear and level. Do not include multipliers like
# Vitality and Sinister Calling; this is just raw stats. See calcs page
# rows 1-9 from my WotLK spreadsheets to see how these are typically
# defined, though the numbers will need to updated for level 85.
def __init__(self, str, agi, ap, crit, hit, exp, haste, mastery, mh, oh, ranged, procs):
# This will need to be adjusted if at any point we want to support
# other classes, but this is probably the easiest way to do it for
# the moment.
self.str = str
self.agi = agi
self.ap = ap
self.crit = crit
self.hit = hit
self.exp = exp
self.haste = haste
self.mastery = mastery
self.mh = mh
self.oh = oh
self.ranged = ranged
self.procs = procs
class Weapon(object):
def __init__(self, damage, speed, is_dagger=False, is_two_handed=False, is_thrown=False, is_ranged=False):
self.average_damage = damage
self.speed = speed
self.is_dagger = is_dagger
self.is_two_handed = is_two_handed
self.is_thrown = is_thrown
self.is_ranged = is_ranged
if is_thrown:
self._normalization_speed = 2.1
elif is_ranged:
self._normalization_speed = 2.8
elif is_two_handed:
self._normalization_speed = 3.3
elif is_dagger:
self._normalization_speed = 1.7
else:
self._normalization_speed = 2.4
def damage(self, ap=0):
return self.average_damage + self.speed * ap/14.
def normalized_damage(self, ap=0):
return self.average_damage + self._normalization_speed * ap/14.
def dps(self, ap=0):
return self.average_damage / self.speed + ap/14.
class Procs(object):
# For the moment I'm just going to take procs as a list of proc names;
# we can worry about a more robust proc system later.
#
# Note that activated abilities (Eng gloves, etc.) should also go here -
# this is the certified dumping ground for anything that's not a static
# stat boost. Which, now that I think about it, also includes metagems.
# And set bonuses. And racial abilities. And some silly stuff like that.
# Probably should look into rewriting this in a more sensible fashion at
# some point, but this will do for now.
#
# Will also need to build a decent list of procs to support at some point.
allowed_procs = frozenset([
'heroic_deaths_verdict',
'heroic_sharpened_twilight_scale',
'relentless_metagem',
'chaotic_metagem'
])
def __init__(self, *args):
for arg in args:
if arg in self.allowed_procs:
setattr(self, arg, True)
def __getattr__(self, name):
# Any proc we haven't assigned a value to, we don't have.
if name in self.allowed_procs:
return False
object.__getattribute__(self, name)
|
from django.apps import AppConfig
class RouteCollectorConfig(AppConfig):
name = 'route_collector'
|
import numpy as np
import cv2
from keras.layers import Input
from keras.layers.convolutional import Conv2D
from keras.models import Model
from os.path import dirname as up_one_dir
from os import listdir
from os.path import isfile, join, abspath
def create_model(img, img_txt, dir_of_images, dir_save_to):
img_path = dir_of_images + img
img_txt_path = dir_of_images + img_txt
sample_inp = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
sample_out = np.loadtxt(img_txt_path, dtype=np.float32)
rows,cols = sample_inp.shape
sample_inp = np.array(sample_inp).reshape((rows,cols,1))
sample_out = np.array(sample_out).reshape((rows,cols,1))
sample_inp.shape
sample_out.shape
samples_inp = np.array([sample_inp])
samples_out = np.array([sample_out])
inp = Input(shape=(None,None,1))
out = Conv2D(1, (3, 3), kernel_initializer='normal', use_bias=False, padding='same')(inp)
model = Model(inputs=inp, outputs=out)
model.summary()
model.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae']) #kompilovanie modelu, mse = mean squared error, optimizer -> ako hladat spravne vahy
num_epochs = 100
for i in range(10,30,40):
num_epochs = i
model.fit(samples_inp, samples_out, batch_size=1, epochs = num_epochs)
model.layers[1].get_weights()
model.evaluate(samples_inp, samples_out, verbose=True)
model.metrics_names
output_images = model.predict(samples_inp)
output_image = output_images[0].reshape((rows,cols))
output_image = abs(output_image);
output_image = cv2.normalize(output_image,None,0,255,cv2.NORM_MINMAX)
output_image = np.uint8(output_image)
name = img[:-4] + str(num_epochs) + '_grayscale.jpg'
print('saving in ', join(dir_save_to, name))
print(name)
print(dir_save_to)
cv2.imwrite(join(dir_save_to, name), output_image)
#model.save('sobel.h5')
#quit()
|
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import os
import numpy as np
import time
import sys
import torch
from torch import nn
from torch.backends import cudnn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import Resize
from reid import datasets
from reid import models
from reid.dist_metric import DistanceMetric
from reid.loss import TripletLoss
from reid.evaluators import Evaluator
from reid.utils.data import transforms as T
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.data.sampler import RandomIdentitySampler
from reid.utils.logging import Logger
from reid.utils.serialization import load_checkpoint, save_checkpoint
from reid.evaluation_metrics import accuracy
from reid.utils.meters import AverageMeter
from reid.datasets import init_dataset
from reid.datasets.dataset_loader import ImageDataset
from IPython import embed
def main(args):
dataset = init_dataset(args.dataset, args.data_dir)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
num_classes = dataset.num_train_pids
train_transformer = T.Compose([
Resize((256, 256)),
T.RandomSizedRectCrop(256, 256),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalizer,
T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
])
train_set = ImageDataset(dataset.train, train_transformer)
train_loader = DataLoader(
train_set, batch_size=args.batch_size,
sampler=RandomIdentitySampler(train_set, args.num_instances), shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=True
)
for i, inputs in enumerate(train_loader):
print('iter',i)
embed()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Triplet loss classification")
# data
parser.add_argument('-d', '--dataset', type=str, default='market1501',
choices=datasets.get_names())
parser.add_argument('-b', '--batch_size', type=int, default=128)
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('--split', type=int, default=0)
parser.add_argument('--height', type=int,
help="input height, default: 256 for resnet*, "
"144 for inception")
parser.add_argument('--width', type=int,
help="input width, default: 128 for resnet*, "
"56 for inception")
parser.add_argument('--combine-trainval', action='store_true',
help="train and val sets together for training, "
"val set alone for validation")
parser.add_argument('--num-instances', type=int, default=8,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 4")
# model
parser.add_argument('-a', '--arch', type=str, default='resnet50',
choices=models.names())
parser.add_argument('--features', type=int, default=128)
parser.add_argument('--dropout', type=float, default=0)
# loss
parser.add_argument('--margin', type=float, default=0.5,
help="margin of the triplet loss, default: 0.5")
# optimizer
parser.add_argument('--lr', type=float, default=0.0003,
help="learning rate of all parameters")
parser.add_argument('--weight-decay', type=float, default=5e-4)
# training configs
parser.add_argument('--resume', type=str, default='', metavar='PATH')
parser.add_argument('--evaluate', action='store_true',
help="evaluation only")
parser.add_argument('--epochs', type=int, default=150)
parser.add_argument('--start_save', type=int, default=0,
help="start saving checkpoints after specific epoch")
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print_freq', type=int, default=10)
# metric learning
parser.add_argument('--dist_metric', type=str, default='euclidean',
choices=['euclidean', 'kissme'])
# misc
working_dir = osp.dirname(osp.abspath(__file__))
parser.add_argument('--data_dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'data'))
parser.add_argument('--logs_dir', type=str, metavar='PATH',
default=osp.join(working_dir, 'logs'))
main(parser.parse_args()) |
class NeweggDL:
APP_ID = ''
def check_for_listings(self, keyword):
return [{
"title": "MSI AMD Radeon RX 480 Armor 8GB OC Video Card GPU",
"url": "https://www.newegg.com/Product/Product.aspx?Item=9SIADFR7C82795&cm_re=rx_480-_-9SIADFR7C82795-_-Product",
"price": 329.99,
"store_name": "newegg",
}] |
#JTSK-350112
# student.py
# Taiyr Begeyev
# t.begeyev@jacobs-university.de
"""
File: student.py
Resources to manage a student's name and test scores.
"""
class Student(object):
"""Represents a student."""
def __init__(self, name, number):
"""All scores are initially 0."""
# print Constructor being called every time it is called
print("Constructor being called")
self._name = name
self._scores = []
for count in range(number):
self._scores.append(0)
def getName(self):
"""Returns the student's name."""
return self._name
def setScore(self, i, score):
"""Resets the ith score, counting from 1."""
self._scores[i - 1] = score
def getScore(self, i):
"""Returns the ith score, counting from 1."""
return self._scores[i - 1]
def getAverage(self):
"""Returns the average score."""
return sum(self._scores) / len(self._scores)
def getHighScore(self):
"""Returns the highest score."""
return max(self._scores)
def __str__(self):
"""Returns the string representation of the student."""
return "Name: " + self._name + "\nScores: " + \
" ".join(map(str, self._scores))
# main program
# test the class
# create three students
student1 = Student("Jenny", 2)
student2 = Student("Steve", 2)
student3 = Student("Celine", 2)
# insert the grades for first 2 quizzes(95 and 90)
student1.setScore(1, 95)
student2.setScore(1, 95)
student3.setScore(1, 95)
student1.setScore(2, 90)
student2.setScore(2, 90)
student3.setScore(2, 90)
# print
print(student1)
print(student2)
print(student3) |
# coding=utf-8
""""" 运行 “.” (当前)目录下的所有测试用例,并生成HTML测试报告 """""
import unittest
from src.lib import HTMLTestReportCN
class RunAllTests(object):
def __init__(self):
self.test_case_path = "."
self.title = "自动化测试报告"
self.description = "测试报告"
def run(self):
test_suite = unittest.TestLoader().discover(self.test_case_path)
# 启动测试时创建文件夹并获取报告的名字
report_dir = HTMLTestReportCN.ReportDirectory()
report_dir.create_dir(title=self.title)
report_path = HTMLTestReportCN.GlobalMsg.get_value("report_path")
fp = open(report_path, "wb")
runner = HTMLTestReportCN.HTMLTestRunner(stream=fp, title=self.title, description=self.description, tester=input("请输入你的名字:"))
runner.run(test_suite)
fp.close()
if __name__ == "__main__":
RunAllTests().run()
|
from openerp.osv import fields, osv
import logging
from logging import getLogger
_logger = getLogger(__name__)
class claim_type(osv.osv):
_name = 'claim.type'
_description = "Type of program"
_columns = {
'claim_type': fields.selection([('1', 'Sickle Cell'), ('2', 'Bed Grant')], 'Claim Type'),
'erp_patient_id':fields.many2one('res.partner','ERP Patient Id',required=True),
}
def create(self, cr, uid, values, context=None):
res=self.pool.get('claim.type').search(cr, uid, [('erp_patient_id', '=', values['erp_patient_id'])], limit=1)
if len(res)>0:
# super(osv.osv, self).write(cr, uid, ids, vals, context=context)
# record = self.browse(cr,uid,res)
# claim_type.write({'claim_type': values['claim_type']})
super(osv.osv, self).write(cr, uid,res, values, context)
erp_patient = res[0];
# _logger('claim_type------create----------%s',erp_patient)
else :
erp_patient = osv.Model.create(self,cr, uid, values, context)
return erp_patient
claim_type()
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
"""
Model class for all entries
"""
import src.DB.DAL as DAL
# governing class for all entries. this is a dict
class Model(dict):
table = None
fields = None
index = None
# get key, vales from **args
def __init__(self, **args):
super(Model, self).__init__(**args)
# 存一份用户不修改的信息
for kw in args.keys():
self['_' + kw] = args[kw]
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self['_' + key] = value
def _query_dict(self):
query_dict = {}
for key in self.keys():
if key[0] == '_':
query_dict[key] = self[key]
return query_dict
def _working_dict(self):
working_dict = {}
for key in self.keys():
if key[0] != '_':
working_dict[key] = self[key]
return working_dict
# todo: 同步
def save(self):
DAL.update(self.__class__.table, **self)
for key in self.__class__.fields:
self[key] = self['_' + key]
@classmethod
def get(cls, **args):
selection = DAL.select_from(cls.table, **args)
# 获取entry信息,创建新instance,initialize,生成list
return [cls(**dict(zip(cls.fields, entry))) for entry in selection]
@classmethod
def add(cls, lst):
if type(lst) == cls:
DAL.insert_into(cls.table, **lst._working_dict())
else:
# 是否需要保持事务性?
with DAL.connection():
for entry in lst:
DAL.insert_into(cls.table, **entry._working_dict())
# remove, 即可传入多个Entry也可传入list of Entry
@classmethod
def remove(cls, lst):
if type(lst) == cls:
DAL.delete_from(cls.table, **lst._working_dict())
else:
with DAL.connection():
for entry in lst:
DAL.delete_from(cls.table, **entry._working_dict())
|
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
class Fejkbiljett(App):
def build(self):
gen_btn = Button(text='Generera',
size_hint=(.90, .10),
pos=(5, 5),
font_size=21)
gen_btn.bind(on_press=StockholmTicket.getMessage)
l = BoxLayout()
l.add_widget(gen_btn)
return l
class StockholmTicket(object):
def getMessage(self):
print "2. this is called on the method getMessage of StockholmTicket"
if __name__ == "__main__":
Fejkbiljett().run()
|
from django.contrib import admin
# Register your models here.
from mezzanine.pages.admin import PageAdmin
from .models import Person, Project
admin.site.register(Person, PageAdmin)
admin.site.register(Project, PageAdmin) |
from ._interface import SmqtkRepresentation
from .classification_element import ClassificationElement, \
get_classification_element_impls
from .data_element import DataElement, get_data_element_impls
from .data_set import DataSet, get_data_set_impls
from .descriptor_element import DescriptorElement, get_descriptor_element_impls
from .descriptor_index import DescriptorIndex, get_descriptor_index_impls
from .key_value import KeyValueStore, get_key_value_store_impls
from .classification_element_factory import ClassificationElementFactory
from .descriptor_element_factory import DescriptorElementFactory
|
import numpy as np
singleDimArray = [1, 2, 3]
numpyArray = np.array(singleDimArray)
print("---Single Dimension Array---")
print(singleDimArray, type(singleDimArray))
print(numpyArray, type(numpyArray))
tenArray = np.arange(-10, 10)
print("range:", tenArray)
print("zeros:", np.zeros((3, 4))) # Forms 3 rows x 4 columns multi dimensional zero array
print("linspace:", np.linspace(6, 24, 4)) # Splitting 6 to 24 into 4 parts
print("linspace:", np.linspace(0, 60, 11)) # Splitting 0 to 60 into 11 parts
twoDimArray = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
numpyMultiArray = np.array(twoDimArray)
print("\n---Two and Multi Dimension Array---")
print(twoDimArray, type(twoDimArray))
print(numpyMultiArray, type(numpyMultiArray))
mArray = np.zeros(8)
arr3d = mArray.reshape(2, 2, 2)
mRavel = arr3d.ravel()
print("2x2x2:\n", arr3d) # 3 Dimensional Array
print("Ravel:", mRavel)
print("4x2:", mArray.reshape(4, 2))
print("8x1:", mArray.reshape(8, 1))
print("5x4:", tenArray.reshape(5, 4))
print("2x10:", tenArray.reshape(2, 10))
toRavel = tenArray.reshape(10, 2)
print("10x2:", toRavel)
print("Ravel:", toRavel.ravel()) # Ravel flatens the multi dimensional array
print("\n---Unorganized Array---")
print(np.array([[1, 2, 3], [4, 5]]))
|
#!/usr/bin/env python
import collections
import functools
import webbrowser
import click
import requests
import termcolor
from . import client
from . import config
from . import filters
from . import output
from . import utils
DEFAULT_AGE_OF_ISSUES_TO_RESOLVE = 30 # days
DEFAULT_AGE_OF_ISSUES_TO_MARK_AS_SEEN = 7 # days
def with_title_filtering(fun):
@functools.wraps(fun)
@click.option(
'--include',
'-f',
multiple=True,
help="Patterns of issue titles to include (Python regular expression)",
)
@click.option(
'--exclude',
'-e',
multiple=True,
help="Patterns of issue titles to exclude (Python regular expression)",
)
def decorated(*args, **kwargs):
return fun(*args, **kwargs)
return decorated
@click.group()
@click.option('--config-file', type=click.Path(exists=True))
@click.option('--site', default=None)
@click.pass_context
def main(ctx, config_file=None, site=None):
cfg = config.load_config(config_file, site)
ctx.obj = cfg
@main.command(name='assigned-issues')
@click.pass_obj
def assigned_issues(cfg):
"""Show assigned issues"""
issues = client.opi_iterator(cfg, query='is:unresolved is:assigned')
for line, _issue in output.tree_shaped(cfg, issues):
print(line)
@main.command(name='assigned-issues-by-assignee')
@click.pass_obj
def assigned_issues_by_assignee(cfg):
"""Show issues by assignee"""
issues = client.opi_iterator(cfg, query='is:unresolved is:assigned')
# build Assignee -> issues table
assigned = collections.defaultdict(list)
for issue in issues:
print('.', end='', flush=True)
assigned[issue['assignedTo']['username']].append(issue)
print()
for assignee, issues in assigned.items():
print('- {}'.format(assignee))
for issue in issues:
print(' - {}'.format(output.shaped_issue(cfg, issue)))
@main.command(name='browse-unseen-issues')
@click.option(
'--age',
default=DEFAULT_AGE_OF_ISSUES_TO_MARK_AS_SEEN,
help="Age (in days) of to check",
show_default=True,
)
@with_title_filtering
@click.pass_obj
def browse_unseen_issues(cfg, age, include, exclude):
"""Browse unseen issues"""
batch_size = 10
msg_tpl = "Opening {} preceding unseen issues in browser? [Y/n | ^C to quit]"
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned')
issues = filters.unseen(filters.max_age(issues, days=age))
issues = filters.filter_title(issues, include, exclude)
for issues in utils.grouper(output.tree_shaped(cfg, issues), batch_size):
print('-' * 120)
for line, issue in issues:
print(line)
confirmation = utils.confirm(msg_tpl.format(len(issues)), 'yn', default='n')
if confirmation == 'y':
for _line, item in issues:
if item:
webbrowser.open_new_tab(item.issue['permalink'])
@main.command(name='check-trends')
@click.option(
'--period',
default='12h', # FIXME: constant
help="""Time period to compute trend stats""",
show_default=True,
)
@click.option(
'--threshold',
default=1, # FIXME: constant
type=click.FLOAT,
help="Issues with a trend ratio below this threshold are ignored",
show_default=True,
)
@with_title_filtering
@click.pass_obj
def check_trends(cfg, period, threshold, include, exclude):
"""Show evolution stats for seen issues"""
stats_period, days, hours = utils.decode_period(period)
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned', statsPeriod=stats_period)
issues = filters.max_age(filters.seen(issues), days=days, hours=hours)
issues = filters.filter_title(issues, include, exclude)
for line, issue in output.tree_shaped(cfg, issues):
if not issue:
print(line)
continue
level, ratio, count = utils.compute_events_stats(
stats_period,
days or hours,
threshold,
issue,
)
if level < 1: # Don't bother with issue
continue
print(
line,
'\n ',
termcolor.colored(
'New in period' if not ratio else 'Ratio {ratio:.01f}'.format(ratio=ratio),
color='magenta' if not ratio else ('red' if level == 2 else 'yellow'),
),
termcolor.colored(
'({count} new occurence(s)'.format(count=count),
color='white', attrs=['bold']
)
)
@main.command(name='mark-as-seen')
@click.option(
'--age',
default=DEFAULT_AGE_OF_ISSUES_TO_MARK_AS_SEEN,
help="Age (in days) of entries to mark seen",
show_default=True,
)
@click.pass_obj
def mark_seen(cfg, age):
"""Mark issues as seen"""
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned')
issues = filters.unseen(filters.outdated(issues, age))
for line, issue in output.tree_shaped(cfg, issues):
line, _issue = utils.run_command(client.mark_as_seen, cfg, issue, line)
print(line)
@main.command(name='merge-issues')
@click.pass_obj
def merge_issues(cfg):
"""Merge related issues together"""
groups = collections.defaultdict(list)
# Collect issues and group them
issues = client.opi_iterator(cfg)
for line, issue in output.tree_shaped(cfg, issues):
if issue:
key = (
issue['project']['slug'],
issue['metadata'].get('type'),
issue['metadata'].get('value'),
issue['metadata'].get('title'),
issue['culprit'],
)
groups[key].append(issue)
print(line)
# Merge issues
for issues in groups.values():
if len(issues) <= 1:
continue
project = issues[0].project['slug']
print(project, ':: merging issues: ', [issue['id'] for issue in issues])
try:
client.merge_issues(cfg, issues)
except requests.exceptions.ConnectionError as e:
print('ERROR, %s' % e)
@main.command(name='needs-triage')
@click.pass_obj
def needs_triage(cfg):
"""Show issues than needs triage"""
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned')
for line, _issue in output.tree_shaped(cfg, issues):
print(line)
@main.command(name='resolve-issues')
@click.option(
'--age',
default=DEFAULT_AGE_OF_ISSUES_TO_RESOLVE,
help='Age (in days) of entries to resolve',
show_default=True,
)
@click.pass_obj
def resolve_issues(cfg, age):
"""Resolve outdated issues"""
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned')
issues = filters.outdated(issues, days=age)
for line, issue in output.tree_shaped(cfg, issues):
line, _issue = utils.run_command(client.resolve_issue, cfg, issue, line)
print(line)
@main.command(name='remove-issues')
@click.option(
'--age',
default=DEFAULT_AGE_OF_ISSUES_TO_RESOLVE,
help='Age (in days) of entries to remove',
show_default=True,
)
@click.pass_obj
def remove_issues(cfg, age):
"""
Remove outdated issues.
No action is performed on commented or assigned issues.
"""
issues = client.opi_iterator(cfg, query='is:unresolved is:unassigned')
issues = filters.outdated(issues, days=age)
for line, issue in output.tree_shaped(cfg, issues):
# Preserve issue with comment (None issue is for display purpose)
if issue is not None and issue.numComments == 0:
line, _issue = utils.run_command(client.delete_issue, cfg, issue, line)
print(line)
if __name__ == '__main__.py':
main()
|
from bs4 import BeautifulSoup
import uuid
from datetime import datetime, timedelta
class DataManager():
def create_data_file(self, elaborationDirectory, elaborationDate):
dataFile = open(elaborationDirectory + 'full_' + elaborationDate + '_data' + '.json', 'a+')
return dataFile
def create_incremental_data_file(self, elaborationDirectory, elaborationDate):
dataFile = open(elaborationDirectory + 'incr_' + elaborationDate + '_data' + '.json', 'a+')
return dataFile
def extract_cases(self, html, startUrl, elaborationDate):
extractedCases = []
soup = BeautifulSoup(html, 'lxml')
casesGrid = soup.find(id = 'tablePerkaraAll')
rows = casesGrid.find_all('tr')
rowPosition = 0
for row in rows:
if rowPosition != 0:
columns = row.find_all('td')
columnPosition = 0
for column in columns:
if columnPosition == 0:
#caseSiteNumber = column.get_text()
columnPosition +=1
continue
if columnPosition == 1:
caseIdentifier = column.get_text()
columnPosition +=1
continue
if columnPosition == 2:
rawRegistrationDate = column.get_text()
registrationDate = self.convert_registration_date(rawRegistrationDate)
columnPosition +=1
continue
if columnPosition == 3:
classification = column.get_text()
columnPosition +=1
continue
if columnPosition == 4:
allParties = column.get_text()
columnPosition +=1
continue
if columnPosition == 5:
status = column.get_text()
columnPosition +=1
continue
if columnPosition == 6:
#duration = column.get_text()
columnPosition +=1
continue
if columnPosition == 7:
linkAttribute = column.find('a', href=True)
detailsLink = linkAttribute['href']
columnPosition +=1
continue
caseProperties = {
"Site": startUrl,
"CreationDate": elaborationDate,
"CaseID": caseIdentifier,
"RegistrationDate": registrationDate,
"Classification": classification,
"AllParties": allParties,
"Status": status,
"DetailsLink": detailsLink
}
extractedCases.append(caseProperties)
rowPosition += 1
else:
rowPosition += 1
continue
return extractedCases
# limit date should be provided as string in the format dd-mm-yyyy'
def check_incremetal_cases(self, extractedCases, limitDate):
limitDate = list(map(int, limitDate.split('-')))
limitDate = datetime(limitDate[2],limitDate[1],limitDate[0])
incrementalCases = list(extractedCases)
for extractedCase in extractedCases:
limitReached = False
registrationDate = extractedCase['RegistrationDate'] #dd-mm-yyyy'
registrationDate = list(map(int, registrationDate.split('-')))
registrationDate = datetime(registrationDate[2],registrationDate[1],registrationDate[0])
if registrationDate < limitDate:
incrementalCases.remove(extractedCase)
limitReached = True
return incrementalCases, limitReached
def convert_registration_date(self, rawRegistrationDate):
if len(rawRegistrationDate) > 0:
splittedDate = rawRegistrationDate.split()
extractedMonth = splittedDate[1]
monthsDict = {'Jan' : '01', 'Feb' : '02',
'Mar' : '03', 'Apr' : '04',
'May' : '05', 'Jun' : '06',
'Jul' : '07', 'Aug' : '08',
'Sep' : '09', 'Oct' : '10',
'Nov' : '11', 'Dec' : '12'}
for monthDict in monthsDict.keys():
if extractedMonth == monthDict:
month = monthsDict[monthDict]
formattedDate = splittedDate[0] + '-' + month + '-' + splittedDate[2]
break
else:
formattedDate = ''
continue
if formattedDate != '':
return formattedDate
else:
return rawRegistrationDate
else:
return rawRegistrationDate
def store_cases(self, dataFile, extractedCases):
for extractedCase in extractedCases:
uniqueCaseID = uuid.uuid1()
uniqueCaseIDStr = '{"index":{"_id":"' + str(uniqueCaseID.int) + '"}}'
extractedCaseString = str(extractedCase)
extractedCaseString = extractedCaseString.replace("'", '"')
dataFile.seek(0)
data = dataFile.read(10)
if len(data) > 0:
dataFile.write('\n')
dataFile.write(uniqueCaseIDStr)
dataFile.write('\n')
dataFile.write(extractedCaseString) |
"""
Invisible Objects
Vanilla Evennia does not allow true hidden objects by default.
The 'view' lock will prevent the object being displayed in a room's description
and stop the look command with "Could not view 'object(#9)'", where as
attempting to look at a non-existant object returns 'Could not find '<object>''.
Likewise, the 'get' lock will disallow getting with "Could not get 'object(#9)'"
instead of 'Could not find '<object>''.
Both of which give away the existance of a hidden object.
This Mixin modify's the objects hooks to fake the object not existing from
the following commands:
-Look
-give
SHORTCOMINGS:
-Because the hooks don't know the command given, they will always use the key
of the object, which may be different if an alias was used. Will also not
relay capitalisations of argument command like normal.
-All other commands, not specifically allowed for, will betray the hidden
object but they will hopefully happen less often.
"""
from evennia import DefaultObject
from evennia.utils import utils
# -----------------------------------------------------------------------------
# Ambient Message Storage
# -----------------------------------------------------------------------------
class RespectInvisibilityMixin():
"""
A mixin to put on Character objects. This will allow
"""
def at_object_creation(self):
self.locks.add("visible:false()")
self.locks.add("view:false()")
def at_look(self, target, **kwargs):
"""
Called when this object performs a look. It allows to
customize just what this means. It will not itself
send any data.
Args:
target (Object): The target being looked at. This is
commonly an object or the current location. It will
be checked for the "view" type access.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call. This will be passed into
return_appearance, get_display_name and at_desc but is not used
by default.
Returns:
lookstring (str): A ready-processed look string
potentially ready to return to the looker.
"""
# Check visible lock, default locks gives away the hidden object.
# Change to pretend no object was found from search.
if not target.access(self, "visible", default=True):
self.msg(f"Could not find '{target.key}'")
return
if not target.access(self, "view"):
try:
return "Could not view '%s'." % target.get_display_name(self, **kwargs)
except AttributeError:
return "Could not view '%s'." % target.key
description = target.return_appearance(self, **kwargs)
# the target's at_desc() method.
# this must be the last reference to target so it may delete itself when acted on.
target.at_desc(looker=self, **kwargs)
return description
def at_before_get(self, getter, **kwargs):
"""
Called by the default `get` command before this object has been
picked up.
Args:
getter (Object): The object about to get this object.
**kwargs (dict): Arbitrary, optional arguments for users
overriding the call (unused by default).
Returns:
shouldget (bool): If the object should be gotten or not.
Notes:
If this method returns False/None, the getting is cancelled
before it is even started.
"""
# Check visible lock, default locks gives away the hidden object.
# Change to pretend no object was found from search.
if not self.access(getter, "visible", default=True):
getter.msg(f"Could not find '{self.key}'")
return False
return True
|
def swapsVarsInArray(array, i, j):
temp = array[i]
array[i] = array[j]
array[j] = temp
|
#!/usr/bin/env python
# coding: utf-8
# # Resnet
#
# ## Please watch Ng C4W2L01-C4W2L04, the first of which is found [here](https://www.youtube.com/watch?v=-bvTzZCEOdM&list=PLkDaE6sCZn6Gl29AoE31iwdVwSG-KnDzF&index=12).
#
# The convolutional neural network that we developed and ran was adequate for use on a small problem with a few classes, but it lacks the explanatory power to produce highly accurate results for more difficult datasets. Instead, more interesting neural networks have been developed which have greater explanatory power. One of the most powerful architectures today is called ResNet, which is short for residual network.
#
# In principle, you could take the network that you've been working on and make it more flexible by adding more convolutional layers, which is to say that we could add more sequences of feature map generation. This is what is meant when people use the term "deep" learning. However, if you did this, you would quickly run into the problem that your network would struggle to learn weights in the lower (closer to the inputs) layers of the network. This is a result of the way that neural networks are trained. In particular they rely on the ability to take the derivative of a misfit function (e.g. least squares) with respect to a parameter, and to adjust the weight based on that derivative. However in (naive) deep networks, this gradient has the tendency to become negligibly small as the impact of that weight gets lost in the myriad layers of convolutions and activations closer to the output.
#
# ResNet solves this problem by ensuring that the information in each weight gets propagated to the output. It does this by simply adding the layer's input to each layer's output, so instead of
# $$
# \mathbf{x}_{l+1} = \mathcal{F}_{l}(\mathbf{x}_l),
# $$
# at each layer, the neural network performs the operation
# $$
# \mathbf{x}_{l+1} = \mathcal{F}_{l}(\mathbf{x}_l) + \mathbf{x}_l.
# $$
# Rearranging this equation, we can see why this architecture is called a residual network:
# $$
# \mathbf{x}_{l+1} - \mathbf{x}_l = \mathcal{F}_{l}(\mathbf{x}_l).
# $$
# Each layer is modeling the residual between consecutive feature maps. The pedantic amongst us will note that this only works when the output of $\mathcal{F}_{l}(\mathbf{x}_l)$ is the same size as the input. This is dealt with by performing a suitable linear transformation on $\mathbf{x}_l$, making the equation
# $$
# \mathbf{x}_{l+1} = \mathcal{F}_{l}(\mathbf{x}_l) + W \mathbf{x}_l,
# $$
# where $W$ is a matrix that has learnable weights. The matrix $W$ is most often formulated as a convolution with a 1x1 kernel size.
#
# The addition of the input is known as a *skip connection* because it looks like this:
# <img src=res_net.svg width=600/>
# The input is run through a normal conv layer (perhaps several) and then added to the output, where it can then be maxpooled or run through an activation or whatever.
#
# Keras makes these sorts of networks pretty easy to program. To start with, let's apply this network to the CIFAR-10 classification problem, but we'll do it for all 10 classes. All the non-model definition code should look the same as our previous example.
# In[13]:
import keras
import keras.datasets as kd
(x_train, y_train), (x_test, y_test) = kd.cifar10.load_data()
labels = ['airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
x_train = x_train/255.
x_test = x_test/255.
# Convert class vectors to binary class matrices.
N = len(labels)
y_train = keras.utils.to_categorical(y_train, N)
y_test = keras.utils.to_categorical(y_test, N)
# Now things get more interesting. Obviously, ResNet as described above is more of a concept than a specific architecture: we'll need to make some more specific design choices. One good way of doing this is to look at the literature and copy what others have done. In particular, the [original ResNet Paper](https://arxiv.org/abs/1512.03385) provides an example of ResNet being applied to CIFAR-10 that yielded excellent accuracy (state of the art c. 2015). Here, we'll emulate their network architecture, which looks like this:
# <img src=cifar_10_res_net.svg width=900/>
# More concretely, the layers of this network up to (and including) the location of the star in the figure above, look like this.
# In[14]:
import keras.layers as kl
import keras.regularizers as kr
# Note the alternative method for model specification: no model.add(.), instead we
# perform sequential operations on layers, then we will make the resulting model later.
# Specify the shape of the input image
input_shape = x_train.shape[1:]
inputs = kl.Input(shape=input_shape)
# First convolution + BN + act
conv = kl.Conv2D(16,(3,3),padding='same',kernel_regularizer=kr.l2(1e-4))(inputs)
bn = kl.BatchNormalization()(conv)
act1 = kl.Activation('relu')(bn)
# Perform 3 convolution blocks
for i in range(3):
conv = kl.Conv2D(16,(3,3),padding='same',kernel_regularizer=kr.l2(1e-4))(act1)
bn = kl.BatchNormalization()(conv)
act = kl.Activation('relu')(bn)
conv = kl.Conv2D(16,(3,3),padding='same',kernel_regularizer=kr.l2(1e-4))(act)
bn = kl.BatchNormalization()(conv)
# Skip layer addition
skip = kl.add([act1,bn])
act1 = kl.Activation('relu')(skip)
# Downsampling with strided convolution
conv = kl.Conv2D(32,(3,3),padding='same',strides=2,kernel_regularizer=kr.l2(1e-4))(act1)
bn = kl.BatchNormalization()(conv)
act = kl.Activation('relu')(bn)
conv = kl.Conv2D(32,(3,3),padding='same',kernel_regularizer=kr.l2(1e-4))(act)
bn = kl.BatchNormalization()(conv)
# Downsampling with strided 1x1 convolution
act1_downsampled = kl.Conv2D(32,(1,1),padding='same',strides=2,kernel_regularizer=kr.l2(1e-4))(act1)
# Downsampling skip layer
skip_downsampled = kl.add([act1_downsampled,bn])
act1 = kl.Activation('relu')(skip_downsampled)
# This final layer is denoted by a star in the above figure
for _ in range(2):
conv = kl.Conv2D(32, (3, 3), padding="same", kernel_regularizer=kr.l2(1e-4))(act1)
bn = kl.BatchNormalization()(conv)
act = kl.Activation('relu')(bn)
conv = kl.Conv2D(32, (3,3), padding='same', kernel_regularizer=kr.l2(1e-4))(act)
bn = kl.BatchNormalization()(conv)
# Skip layer addition
skip = kl.add([act1,bn])
act1 = kl.Activation('relu')(skip)
# Downsampling with strided convolution
conv = kl.Conv2D(64, (3,3), padding='same', strides=2, kernel_regularizer=kr.l2(1e-4))(act1)
bn = kl.BatchNormalization()(conv)
act = kl.Activation('relu')(bn)
conv = kl.Conv2D(64, (3,3), padding='same', kernel_regularizer=kr.l2(1e-4))(act)
bn = kl.BatchNormalization()(conv)
# Downsampling with strided 1x1 convolution
act1_downsampled = kl.Conv2D(64,(1,1),padding='same',strides=2,kernel_regularizer=kr.l2(1e-4))(act1)
# Downsampling skip layer
skip_downsampled = kl.add([act1_downsampled,bn])
act1 = kl.Activation('relu')(skip_downsampled)
# This final layer is denoted by a star in the above figure
for _ in range(2):
conv = kl.Conv2D(64, (3, 3), padding="same", kernel_regularizer=kr.l2(1e-4))(act1)
bn = kl.BatchNormalization()(conv)
act = kl.Activation('relu')(bn)
conv = kl.Conv2D(64, (3,3), padding='same', kernel_regularizer=kr.l2(1e-4))(act)
bn = kl.BatchNormalization()(conv)
# Skip layer addition
skip = kl.add([act1,bn])
act1 = kl.Activation('relu')(skip)
# To ensure that we have the output shape that we expect at this stage, we can look at the shape of act1
# In[15]:
act1
# Which is an object of size 16x16x32, the correct size based on our chosen architecture (note the first question mark indicates an unknown number of input images: thus if we ran the model on a single photo, this would be a 1, if we ran it on the entire CIFAR training set at once it would be 50000). As before, we can use this model for classification by doing global average pooling, then the softmax function.
# In[16]:
gap = kl.GlobalAveragePooling2D()(act1)
bn = kl.BatchNormalization()(gap)
final_dense = kl.Dense(N)(bn)
softmax = kl.Activation('softmax')(final_dense)
# In[17]:
import keras.models as km
model = km.Model(inputs=inputs,outputs=softmax)
# initiate adam optimizer
opt = keras.optimizers.rmsprop(lr=0.001,decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=128,
epochs=100,
validation_data
=(x_test, y_test),
shuffle=True)
# While the code as is works, it is *not* the complete architecture given in the figure above. **Implement the remainder of the network, and train the model for 100 epochs.** The complete architecture has quite a few parameters, so you'll definitely want to use a GPU, i.e. run it on the cluster (reference the job script included in this repo).
#
# There are also a few extra tidbits to make this work better. First, we'll want to checkpoint the model, which is to say that we'll want to save the weights anytime the model improves during the training process. We can do this easily in Keras with a checkpoint function:
# In[ ]:
import keras.callbacks as kc
filepath = './checkpoints'
# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = kc.ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True)
# Note that these weights can then be loaded into a model on your local machine for more convenient post-processing and visualization of results. We'll also want to reduce the learning rate as the model reaches an optimal solution. We can do this with a *learning rate schedule*.
#
# In[ ]:
def lr_schedule(epoch):
lr = 1e-3
if epoch > 60:
lr *= 1e-3
print('Learning rate: ', lr)
return lr
lr_scheduler = kc.LearningRateScheduler(lr_schedule)
# We can include these two functions as *callbacks* to the optimizer:
# In[ ]:
model.fit(x_train, y_train,
batch_size=128,
epochs=100,
validation_data=(x_test, y_test),
shuffle=True,
callbacks=[checkpoint,lr_scheduler])
# Once your model is fitted, **adapt your class activation mapping routine to run on this more advanced architecture, and compute a few examples? How do these activation maps differ from those computed for the smaller network?**
# In[ ]:
|
# -*- coding:utf-8 -*-
import pandas as pd
from sklearn.preprocessing import LabelEncoder, StandardScaler, Imputer
from sklearn.preprocessing import OneHotEncoder
# from nltk.corpus import stopwords
# import nltk
from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD
import codecs
import numpy as np
import xgboost as xgb
from sklearn.cross_validation import train_test_split
import cPickle,re, os, json
from copy import deepcopy
import jieba
import sys , operator
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
reload(sys)
sys.setdefaultencoding('utf-8')
jobs_en = codecs.open('../data/pos_class.txt', 'rb').readlines()
jobs_en = [ job.strip().split("(")[0] for job in jobs_en]
jobs = codecs.open('../data/pos_name.txt', 'rb', encoding = 'utf-8').readlines()
jobs = [ job.strip() for job in jobs]
jobs_dict = {}
reverse_dict = {}
jobi = 0
for job in jobs:
jobs_dict[job] = jobi
reverse_dict[jobi] = job
jobi = jobi + 1
indi = 0
ind_dict = {}
inds = codecs.open('ind_dict.txt', 'rb', encoding = 'utf-8').readlines()
for ind in inds:
ind_dict[ind.split(':')[0]] = indi
indi = indi + 1
pos_dict = {}
poss = codecs.open('../data/pos_rank.txt', 'rb', encoding = 'utf-8').readlines()
for ind in poss:
title, rank = ind.strip().split(":")
pos_dict[title] = rank
wa = 0.35
wb = 0.86
wc = 0.74
wd = 2.25
# model = read_vec("../data/glove.6B.100d.txt")
# print np.mean(np.array([model[part.lower()] for part in jobs_en[0].strip().split()]) , 0)
# jobvec = np.array([ np.mean(np.array([model[part.lower()] for part in job.strip().split() if part.lower() not in stopwords.words('english') ]), 0 ) for job in jobs_en])
# print jobvec.shape
def get_max_duration_length(x):
return []
def get_job_duration(x, i):
try :
end_year = int(x[i]['end_date'][:4])
except :
end_year = 2015
try :
end_month = int(x[i]['end_date'][5:])
except :
end_month = 12
try :
start_year = int(x[i]['start_date'][:4])
except :
start_year = end_year -1
try :
start_month = int(x[i]['start_date'][5:])
except :
start_month = end_month -1
return (end_year - start_year) * 12 + end_month - start_month
def get_interval(x, i, j):
try :
end_year = int(x[i]['end_date'][:4])
except:
end_year = 2015
try:
end_month = int(x[i]['end_date'][5:])
start_year = int(x[j]['start_date'][:4])
start_month = int(x[j]['start_date'][5:])
r = (end_year - start_year) * 12 + end_month - start_month
except :
r = -1
return r
def work_total_month(x):
try :
end_year = int(x[0]['end_date'][:4])
except:
end_year = 2015
try:
end_month = int(x[0]['end_date'][5:])
start_year = int(x[-1]['start_date'][:4])
start_month = int(x[-1]['start_date'][5:])
r = (end_year - start_year) * 12 + end_month - start_month
except :
r = -1
return r
def num_unique_work(x):
x = [ x[i] for i in range(len(x)) if i!=1 ]
xlist = [ xx['position_name'] if xx is not None else 'none' for xx in x ]
return len(set(xlist))
def benchmark(train):
pred_size = train['workExperienceList'].apply( lambda x : np.round((x[2]['size'] )) )
acc_size = np.sum(pred_size == train['workExperienceList'].apply(lambda x: x[1]['size']))/1.0/len(train)
print('benchmarking acc of size:' + str(acc_size) )
pred_salary = train['workExperienceList'].apply( lambda x : np.round((x[0]['salary'] )) )
acc_salary = np.sum(pred_salary == train['workExperienceList'].apply(lambda x: x[1]['salary']))/1.0/len(train)
print('benchmarking acc of salary:' + str(acc_salary) )
train = train.loc[train['workExperienceList'].apply(lambda x: x[1]['position_name']).isin(jobs)]
pred_pos = train['workExperienceList'].apply( lambda x : x[0]['position_name'] )
acc_pos = np.sum(pred_pos == train['workExperienceList'].apply(lambda x: x[1]['position_name'] if x[1]['position_name'] in jobs else u"销售经理" ))/1.0/len(train)
print('benchmarking acc of pos: ' + str(acc_pos) )
rough_score(0.7, acc_size, acc_salary, acc_pos)
def rough_score(acc_deg, acc_size, acc_salary, acc_pos):
score = (acc_deg * wa + acc_size * wb + acc_salary * wc + acc_pos * wd)/(wa+wb+wc+wd)
print('rough estimation of final score: ' + str(score))
def auto_transform(train, test, nameA):
le = LabelEncoder()
#train[nameA] = train[nameA].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
#test[nameA] = test[nameA].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
le.fit(list(train[nameA]) + list(test[nameA]))
train[nameA] = le.transform(train[nameA])
test[nameA] = le.transform(test[nameA])
return train, test
def getFeatureTotal(train, test):
le = LabelEncoder()
le.fit(list(test['last_pos']) + list(train['last_pos']))
train['last_pos'] = le.transform(train['last_pos'])
test['last_pos'] = le.transform(test['last_pos'])
le = LabelEncoder()
train['last_industry'] = train['last_industry'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
test['last_industry'] = test['last_industry'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
le.fit(list(train['last_industry']) + list(test['last_industry']))
train['last_industry'] = le.transform(train['last_industry'])
test['last_industry'] = le.transform(test['last_industry'])
le = LabelEncoder()
train['last_dep'] = train['last_dep'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
test['last_dep'] = test['last_dep'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
le.fit(list(train['last_dep']) + list(test['last_dep']))
train['last_dep'] = le.transform(train['last_dep'])
test['last_dep'] = le.transform(test['last_dep'])
le.fit(list(test['pre_pos']) + list(train['pre_pos']))
train['pre_pos'] = le.transform(train['pre_pos'])
test['pre_pos'] = le.transform(test['pre_pos'])
train['pre_industry'] = train['pre_industry'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
test['pre_industry'] = test['pre_industry'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
le.fit(list(train['pre_industry']) + list(test['pre_industry']))
train['pre_industry'] = le.transform(train['pre_industry'])
test['pre_industry'] = le.transform(test['pre_industry'])
train['pre_dep'] = train['pre_dep'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
test['pre_dep'] = test['pre_dep'].apply(lambda x : " ".join(jieba.cut(x)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
le.fit(list(train['pre_dep']) + list(test['pre_dep']))
train['pre_dep'] = le.transform(train['pre_dep'])
test['pre_dep'] = le.transform(test['pre_dep'])
# le.fit(list(test['first_pos']) + list(train['first_pos']))
# train['first_pos'] = le.transform(train['first_pos'])
# test['first_pos'] = le.transform(test['first_pos'])
#train, test = auto_transform(train,test, 'first_industry')
#train, test = auto_transform(train,test, 'first_dep')
# train, test = auto_transform(train, test, 'pre2_pos')
# train, test = auto_transform(train, test, 'pre2_industry')
# train, test = auto_transform(train, test, 'pre2_dep')
return train, test
def getSizeFeatures(train):
# train['salary_ratio'] = train['last_salary']/1.0/train['pre_salary']
# train['size_ratio'] = train['last_size']/1.0/train['pre_size']
# train['salary_prod'] = train['last_salary'] * train['pre_salary']
# train['size_prod'] = train['last_size'] * train['pre_size']
# train['last_end_year'] = train['workExperienceList'].apply(lambda x : 2015 if not x[0]['end_date'][:4].startswith("20") else int(x[0]['end_date'][:4]) )
# train['last_end_month'] = train['workExperienceList'].apply(lambda x : 7 if not x[0]['end_date'][:4].startswith("20") else int(x[0]['end_date'][5:]) )
# train['last_ss_ratio'] = train['last_salary']/train['last_size']
# train['last_ss_prod'] = train['last_salary'] * train['last_size']
# train['pre_ss_ratio'] = train['pre_salary']/train['pre_size']
# train['pre_ss_prod'] = train['pre_salary'] * train['pre_size']
# train['start_work_year'] = train['workExperienceList'].apply(lambda x : 0 if x[len(x)-1]['start_date'] is None else int(x[len(x)-1]['start_date'].split('-')[0]) )
# train['max_size'] = train['workExperienceList'].apply(lambda x : np.max([ 0 if x[i] is None else x[i]['size'] for i in range(len(x)) if i != 1]))
# train['min_size'] = train['workExperienceList'].apply(lambda x : np.min([0 if x[i] is None else x[i]['size'] for i in range(len(x)) if i != 1]))
# train['size_mm_ratio'] = train['max_size']/1.0/train['min_size']
# train['pre_job_long'] = train['workExperienceList'].apply(lambda x : get_job_duration(x, 2))
# train['pre_job_islong']= train['workExperienceList'].apply(lambda x : 1 if get_job_duration(x, 2) > 36 else 0)
# train['pre_3tuple_prod'] = train['pre_job_long'] * train['pre_ss_prod']
# train['2job_ratio'] = train['last_job_long']/1.0/train['pre_job_long']
# train['last_3tuple_prod'] = train['last_job_long'] * train['last_ss_prod']
train['last_dep'] = train['workExperienceList'].apply(lambda x : x[0]['department'] if x[0]['department'] is not None else 'none')
train['pre_dep'] = train['workExperienceList'].apply(lambda x : x[2]['department'] if len(x)>=2 and x[2]['department'] is not None else 'none')
train['last_dep'] = train['workExperienceList'].apply(lambda x : x[0]['department'] if x[0]['department'] is not None else 'none')
train['first_dep'] = train['workExperienceList'].apply(lambda x : x[-1]['department'] if x[-1]['department'] is not None else 'none')
train['last_industry'] = train['workExperienceList'].apply(lambda x : x[0]['industry'])
train['pre_industry'] = train['workExperienceList'].apply(lambda x : x[2]['industry'] if x[2]['industry'] is not None else 'none')
train['work_age'] = train['workExperienceList'].apply(lambda x :work_total_month(x))
train['last_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, 0, 0))
train['pre_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, 2, 2))
train['first_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, -1, -1))
return train
def getFeature(train):
train['last_dep'] = train['workExperienceList'].apply(lambda x : x[0]['department'] if x[0]['department'] is not None else 'none')
train['last_salary'] = train['workExperienceList'].apply(lambda x : x[0]['salary'])
train['last_size'] = train['workExperienceList'].apply(lambda x : x[0]['size'])
train['last_industry'] = train['workExperienceList'].apply(lambda x : x[0]['industry'])
train['last_pos'] = train['workExperienceList'].apply(lambda x : x[0]['position_name'])
train['pre_pos'] = train['workExperienceList'].apply(lambda x : x[2]['position_name'] if x[2]['position_name'] is not None else 'none')
train['pre_dep'] = train['workExperienceList'].apply(lambda x : x[2]['department'] if len(x)>=2 and x[2]['department'] is not None else 'none')
train['pre_salary'] = train['workExperienceList'].apply(lambda x : x[2]['salary'] if x[2]['salary'] is not None else 1)
train['pre_size'] = train['workExperienceList'].apply(lambda x : x[2]['size'] if x[2]['size'] is not None else 1)
train['pre_industry'] = train['workExperienceList'].apply(lambda x : x[2]['industry'] if x[2]['industry'] is not None else 'none')
# train['pre2_pos'] = train['workExperienceList'].apply(lambda x : x[3]['position_name'] if len(x)>3 and x[3]['position_name'] is not None else 'none')
# train['pre2_dep'] = train['workExperienceList'].apply(lambda x : x[3]['department'] if len(x)>3 and x[3]['department'] is not None else 'none')
# train['pre2_salary'] = train['workExperienceList'].apply(lambda x : x[3]['salary'] if len(x)>3 and x[3]['salary'] is not None else 1)
# train['pre2_size'] = train['workExperienceList'].apply(lambda x : x[3]['size'] if len(x)>3 and x[3]['size'] is not None else 1)
# train['pre2_industry'] = train['workExperienceList'].apply(lambda x : x[3]['industry'] if len(x)>3 and x[3]['industry'] is not None else 'none')
#train['first_dep'] = train['workExperienceList'].apply(lambda x : x[-1]['department'] if x[-1]['department'] is not None else 'none')
#train['first_pos'] = train['workExperienceList'].apply(lambda x : x[-1]['position_name'])
#train['first_industry'] = train['workExperienceList'].apply(lambda x : x[-1]['industry'])
#train['first_salary'] = train['workExperienceList'].apply(lambda x : x[-1]['salary'])
#train['first_size'] = train['workExperienceList'].apply(lambda x : x[-1]['size'])
train['work_age'] = train['workExperienceList'].apply(lambda x :work_total_month(x))
train['last_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, 0, 0))
train['pre_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, 2, 2))
#train['first_job_long'] = train['workExperienceList'].apply(lambda x : get_interval(x, -1, -1))
train['num_times_work'] = train['workExperienceList'].apply(lambda x : len(x)-1)
train['num_unique_work']= train['workExperienceList'].apply(lambda x : num_unique_work(x))
train['age'] = train['age'].apply(lambda x : 0 if len(x.encode('ascii','ignore')) == 0 else int(x.encode('ascii', 'ignore')))
train['start_work_age'] = train['age'] - train['workExperienceList'].apply(lambda x : work_total_month(x)/12.0)
return train
def createOneHotFeature(train, test, features):
i = 0
#train_oneh = np.array([])
for feature in features:
enc = OneHotEncoder()
train_f = enc.fit_transform(np.array(train[feature].reshape(len(train),1)))
test_f = enc.fit_transform(np.array(test[feature].reshape(len(test),1)))
if i > 0:
train_oneh = np.hstack([train_oneh, train_f.toarray()])
test_oneh = np.hstack([test_oneh, test_f.toarray()])
else :
train_oneh = train_f.toarray()
test_oneh = test_f.toarray()
i = i + 1
return train_oneh, test_oneh
def getMultiLogFeatures(train, val , test, xlist, nameC):
i = 0
num_features = len(train.columns)
for nameA in xlist:
if i == 0:
train1, val1, test1 = getNewLogFeatures(train, val, test, nameA, nameC)
else :
trainX, valX, testX = getNewLogFeatures(train, val, test, nameA, nameC)
keep_list = trainX.columns[num_features:]
train1 = pd.concat([train1, trainX[keep_list] ], axis =1 )
val1 = pd.concat([val1, valX[keep_list]], axis =1 )
test1 = pd.concat([test1, testX[keep_list]], axis = 1)
i = i + 1
return train1, val1, test1
def TFIDFeature(train, val, test, nameA):
print('--- generating TFIDF features')
train_desp_list = []
test_desp_list = []
val_desp_list = []
print test.columns
for lists in test['workExperienceList']:
try:
lines = ' '.join([ re.sub('(|)', '',lists[i][nameA]) for i in range(len(lists)) if i != 1 and lists[i][nameA] is not None ])
test_desp_list.append(" ".join(jieba.cut(lines, cut_all=True)))
except TypeError:
test_desp_list.append('none')
continue
for lists in train['workExperienceList']:
try:
lines = ' '.join([ re.sub('(|)', '',lists[i][nameA]) for i in range(len(lists)) if i != 1 and lists[i][nameA] is not None])
train_desp_list.append(" ".join(jieba.cut(lines, cut_all=True)))
except TypeError:
train_desp_list.append('none')
continue
for lists in val['workExperienceList']:
try:
lines = ' '.join([ re.sub('(|)', '',lists[i][nameA]) for i in range(len(lists)) if i != 1 and lists[i][nameA] is not None])
val_desp_list.append(" ".join(jieba.cut(lines, cut_all=True)))
except TypeError:
val_desp_list.append('none')
continue
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
vect = TfidfVectorizer(max_features= 10000, strip_accents='unicode',
analyzer='char',sublinear_tf=1, ngram_range=(2, 7)
)
train_desp_vec= vect.fit_transform(train_desp_list)
test_desp_vec = vect.transform(test_desp_list)
val_desp_vec = vect.transform(val_desp_list)
return train_desp_vec, val_desp_vec, test_desp_vec
def read_vec(filename):
f = open(filename, 'rb').readlines()
wordvecs = {}
for line in f :
words = line.strip().split()
vecs = np.array(words[1:], dtype = np.float32)
wordvecs[words[0]] = vecs
return wordvecs
def parse_data(df,logodds,logoddsPA, NameA, NameC):
feature_list=df.columns.tolist()
cleanData=df[feature_list]
cleanData.index=range(len(df))
print("Creating A features")
address_features=cleanData[NameA].apply(lambda x: logodds[x])
address_features.columns=["logodds"+ NameA + NameC + str(x) for x in range(len(address_features.columns))]
#print("Creating one-hot variables")
#dummy_ranks_PD = pd.get_dummies(cleanData['Upc'], prefix='U')
#dummy_ranks_DAY = pd.get_dummies(cleanData["FinelineNumber"], prefix='FN')
cleanData["logodds" + NameA + NameC ]=cleanData[NameA].apply(lambda x: logoddsPA[x])
#cleanData=cleanData.drop("Upc",axis=1)
#cleanData=cleanData.drop("FinelineNumber",axis=1)
feature_list=cleanData.columns.tolist()
features = cleanData[feature_list].join(address_features.ix[:,:])
return features
from sklearn.base import TransformerMixin
class DataFrameImputer(TransformerMixin):
def __init__(self):
"""Impute missing values.
Columns of dtype object are imputed with the most frequent value
in column.
Columns of other types are imputed with mean of column.
"""
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].mean() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
def get_log_count(trainDF, NameA, NameC):
addresses=sorted(trainDF[NameA].unique())
categories=sorted(trainDF[NameC].unique())
C_counts=trainDF.groupby([NameC]).size()
A_C_counts=trainDF.groupby([NameA, NameC]).size()
A_counts=trainDF.groupby([NameA]).size()
logodds={}
logoddsPA={}
MIN_CAT_COUNTS=2
default_logodds=np.log(C_counts/len(trainDF))- np.log(1.0-C_counts/float(len(trainDF)))
for addr in addresses:
PA=A_counts[addr]/float(len(trainDF))
logoddsPA[addr]=np.log(PA)- np.log(1.-PA)
logodds[addr]=deepcopy(default_logodds)
for cat in A_C_counts[addr].keys():
if (A_C_counts[addr][cat]>MIN_CAT_COUNTS) and A_C_counts[addr][cat]<A_counts[addr]:
PA=A_C_counts[addr][cat]/float(A_counts[addr])
logodds[addr][categories.index(cat)]=np.log(PA)-np.log(1.0-PA)
logodds[addr]=pd.Series(logodds[addr])
logodds[addr].index=range(len(categories))
return logodds, logoddsPA, default_logodds
def generate_log_features(trainDF, testDF, logodds, logoddsPA, NameA, NameC, default_logodds):
addresses=sorted(trainDF[NameA].unique())
A_counts=trainDF.groupby([NameA]).size()
categories=sorted(trainDF[NameC].unique())
features = parse_data(trainDF,logodds,logoddsPA, NameA, NameC)
collist=features.columns.tolist()[2:]
# scaler = StandardScaler()
# scaler.fit(features[collist])
# features[collist]=scaler.transform(features[collist])
new_addresses=sorted(testDF[NameA].unique())
new_A_counts=testDF.groupby(NameA).size()
only_new=set(new_addresses+addresses)-set(addresses)
only_old=set(new_addresses+addresses)-set(new_addresses)
in_both=set(new_addresses).intersection(addresses)
for addr in only_new:
PA=new_A_counts[addr]/float(len(testDF)+len(trainDF))
logoddsPA[addr]=np.log(PA)- np.log(1.-PA)
logodds[addr]=deepcopy(default_logodds)
logodds[addr].index=range(len(categories))
for addr in in_both:
PA=(A_counts[addr]+new_A_counts[addr])/float(len(testDF)+len(trainDF))
logoddsPA[addr]=np.log(PA)-np.log(1.-PA)
features_sub =parse_data(testDF,logodds,logoddsPA, NameA, NameC)
# scaler.fit(features_test)
#collist=features_sub.columns.tolist()[1:]
#features_sub[collist]=scaler.transform(features_sub[collist])
return features, features_sub
def getDataMatrix(vNumber, new_dep, mat, model):
for i in range(len(vNumber)):
ind = vNumber[i]
try :
words = ' '.join(new_dep[ind])
words = re.sub('[^a-zA-Z]+', ' ', words)
words = [ w.lower() for w in words.split() if w not in stopwords.words('english') ]
vec = [0] * dim
count = 0
for w in words:
if w in model :
count = count + 1
mat[i,:] = mat[i,:] + model[w]
if count == 0 :
mat[i,:] = np.array([0] * dim)
else:
mat[i,:] = mat[i,:]/count
except TypeError:
mat[i,:] = np.array([0] * dim)
return mat
def load_data():
train_list = []
for line in open('../data/train_clean.json', 'r'):
train_list.append(json.loads(line))
train = pd.DataFrame(train_list)
#train_work = train[names[-1]]
test_list = []
for line in open('../data/test_clean.json', 'r'):
test_list.append(json.loads(line))
test = pd.DataFrame(test_list)
print('--- NLP on major, simply cut the first word')
le = LabelEncoder()
print len(set(train['major']))
train['major'] = train['major'].apply(lambda x : " ".join(jieba.cut(x, cut_all = False)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
test['major'] = test['major'].apply(lambda x : " ".join(jieba.cut(x, cut_all = False)).split()[0] if x is not None and len(" ".join(jieba.cut(x)).split()) > 0 else 'none')
print len(set(train['major']))
le.fit(list(train['major']) + list(test['major']))
train['major'] = le.transform(train['major'])
test['major'] = le.transform(test['major'])
le = LabelEncoder()
train['gender'] = le.fit_transform(train['gender'])
names = train.columns
le = LabelEncoder()
test['gender'] = le.fit_transform(test['gender'])
del train['_id']
del test['_id']
train = train.fillna(0)
test = test.fillna(0)
#test['age'] = test['age'].apply(lambda x : int(x.replace(u'岁','').encode('ascii')))
return train, test
def getPosDict():
pos = dict()
posdes = codecs.open('pos_dict.txt', 'rb',encoding = 'utf-8').readlines()
for line in posdes:
try:
ch, en = line.strip().split(":")
pos[ch] = en
except ValueError:
pos[ch] = 'manager'
#print ch, pos[ch]
return pos
def getMostSimilar(x):
'''
x is a chinese position_name
new x is one of 32 position_name
'''
pos = getPosDict()
try:
en_x = pos[x]
titles = en_x.decode('utf-8').encode('ascii', 'ignore')
xvec = np.mean(np.array([ model[s.lower()] if s.lower() in model and s.lower() not in stopwords.words('english') else [0]* 100 for s in titles.strip().split() ]), 0)
dots = [np.dot(xvec, jobvec[i, :]) for i in range(32)]
#print dots, len(dots)
index = np.argmax(np.array(dots))
newx = jobs[index]
print x, newx
except KeyError:
newx = u'销售经理'
except UnicodeEncodeError:
newx = u'销售经理'
return newx
def preprocess_degree():
if os.path.isfile('datasets_degree.pkl'):
with open('datasets_degree.pkl', 'rb') as fp:
newtrain = cPickle.load(fp)
train_Y = cPickle.load(fp)
le = cPickle.load(fp)
newtest = cPickle.load(fp)
else :
print('--- reading input files')
offset = 60000
train, test = load_data()
print('--- fill NaN')
train = train.fillna(train.median())
test = test.fillna(test.median())
# train = DataFrameImputer().fit_transform(train)
# #val = DataFrameImputer().fit_transform(val)
# test = DataFrameImputer().fit_transform(test)
names = train.columns
print('--- Create Major Features')
train = getFeature(train)
test = getFeature(test)
train, test = getFeatureTotal(train, test)
# train['rank_pos'] = train['last_pos'].apply(lambda x : pos_dict[x])
# test['rank_pos'] = test['last_pos'].apply(lambda x : pos_dict[x])
# enc = OneHotEncoder()
# #enc.fit(np.array(train['rank_pos']))
# train_rank_pos = enc.fit_transform(np.array(train['rank_pos'].reshape(70000,1)))
# test_rank_pos = enc.fit_transform(np.array(test['rank_pos'].reshape(20000,1)))
#onehfeatures = ['rank_pos', 'last_salary', 'last_size', 'work_age', 'num_times_work', 'max_salary' ]
#train_oneh , test_oneh = createOneHotFeature(train, test, onehfeatures)
print train.columns
train_degree = train['degree']
train, val , train_y, val_y= train[:offset], train[offset:], train_degree[:offset], train_degree[offset:]
train = train.drop(['id'], 1)
val = val.drop(['id'], 1)
test = test.drop(['id'], 1)
# train = DataFrameImputer().fit_transform(train)
# val = DataFrameImputer().fit_transform(val)
# test = DataFrameImputer().fit_transform(test)
print('generate log features of degree')
train1, val1, test1 = getNewLogFeatures(train, val, test, "major", "degree")
train2, val2, test2 = getNewLogFeatures(train, val, test, "last_salary", "degree")
train3, val3, test3 = getNewLogFeatures(train, val, test, "last_pos", "degree")
train4, val4, test4 = getNewLogFeatures(train, val, test, "age", "degree")
train5, val5, test5 = getNewLogFeatures(train, val, test, "last_industry", "degree")
train6, val6, test6 = getNewLogFeatures(train, val, test, "pre_pos", "degree")
train7, val7, test7 = getNewLogFeatures(train, val, test, "pre_dep", "degree")
num_features = len(train.columns)
keep_list2 = train2.columns[num_features:]
keep_list3 = train3.columns[num_features:]
keep_list4 = train4.columns[num_features:]
keep_list5 = train5.columns[num_features:]
keep_list6 = train6.columns[num_features:]
keep_list7 = train7.columns[num_features:]
train = pd.concat([train1, train2[keep_list2], train3[keep_list3], train4[keep_list4], train5[keep_list5], train6[keep_list6],train7[keep_list7] ], axis =1 )
val = pd.concat([val1, val2[keep_list2], val3[keep_list3], val4[keep_list4], val5[keep_list5], val6[keep_list6], val7[keep_list7] ], axis =1 )
test = pd.concat([test1, test2[keep_list2], test3[keep_list3], test4[keep_list4], test5[keep_list5], test6[keep_list6], test7[keep_list7] ], axis =1 )
train = train.drop('degree',1)
val = val.drop('degree',1)
test = test.drop('degree',1)
train_tfidf, val_tfidf , test_tfidf = TFIDFeature(train, val, test, 'industry')
train = train.drop(['workExperienceList'], 1)
val = val.drop(['workExperienceList'], 1)
test = test.drop(['workExperienceList'], 1)
create_feature_map(train)
#train_oneh, val_oneh = train_oneh[:offset,:], train_oneh[offset:,:]
# train = np.hstack([np.array(train), train_tfidf.toarray(), train_oneh])
# val = np.hstack([np.array(val) , val_tfidf.toarray(), val_oneh])
# test= np.hstack([np.array(test) , test_tfidf.toarray(), test_oneh])
train = np.array(train)
val = np.array(val)
test = np.array(test)
xgtrain = xgb.DMatrix(train, label = train_y)
xgval = xgb.DMatrix(val, label = val_y)
xgtest = xgb.DMatrix(test)
watchlist = [(xgtrain, 'train'),(xgval, 'val')]
params = {}
params["objective"] = 'multi:softmax'
params["eta"] = 0.1
params["subsample"] = 0.7
params["colsample_bytree"] = 0.8
params["silent"] = 1
params["max_depth"] = 8
params["min_child_weight"] = 4
params["gamma"] = 1
params["num_class"] = 3
params["eval_metric"] = 'merror'
model = xgb.train(list(params.items()), xgtrain, 500, watchlist, early_stopping_rounds= 10)
plot_importance(model, 'degree_feature_important_xgb.png')
pred = model.predict(xgtest)
sub = pd.read_csv('result/benchmark.csv')
sub['degree'] = pd.Series([int(x) for x in pred])
sub.to_csv('result/degree_pre.csv' ,encoding="utf-8", index=False)
return pred
def preprocess_size():
from sklearn.feature_selection import RFE
if os.path.isfile('datasets_degree.pkl'):
with open('datasets_degree.pkl', 'rb') as fp:
newtrain = cPickle.load(fp)
train_Y = cPickle.load(fp)
le = cPickle.load(fp)
newtest = cPickle.load(fp)
else :
print('--- reading input files')
offset = 60000
train, test = load_data()
# train = getFeature(train)
# test = getFeature(test)
# train ,test = getFeatureTotal(train, test)
# print('--- add size specfic features...')
# # train = getSizeFeatures(train)
# # test = getSizeFeatures(test)
# train['size'] = train['workExperienceList'].apply(lambda x : x[1]['size']) - 1
# train,val,train_y, val_y = train[:offset], train[offset:], train['size'][:offset], train['size'][offset:]
# train = train.drop(['id'], 1)
# val = val.drop(['id'], 1)
# test = test.drop(['id'], 1)
# #featuresList = ['last_pos', 'last_job_long', 'last_size', 'last_salary', 'pre_pos', 'last_dep', 'pre_dep']
# featuresList = ['last_pos', 'pre_dep' ]
# train, val, test = getMultiLogFeatures(train, val, test, featuresList, "size")
# train = train.drop('size',1)
# val = val.drop('size',1)
# #train_tfidf, val_tfidf , test_tfidf = TFIDFeature(train, val, test, 'industry')
# train = train.drop(['workExperienceList'], 1)
# val = val.drop(['workExperienceList'], 1)
# test = test.drop(['workExperienceList'], 1)
# create_feature_map(train)
# # train = np.hstack([np.array(train), train_tfidf.toarray()])
# # val = np.hstack([np.array(val) , val_tfidf.toarray()])
# # test= np.hstack([np.array(test) , test_tfidf.toarray()])
# train = np.array(train)
# val = np.array(val)
# test = np.array(test)
# xgtrain = xgb.DMatrix(train, label = train_y)
# xgval = xgb.DMatrix(val, label = val_y)
# xgtest = xgb.DMatrix(test)
# watchlist = [(xgtrain, 'train'),(xgval, 'val')]
# params = {}
# params["objective"] = 'multi:softmax'
# params["eta"] = 0.1
# params["subsample"] = 1
# params["colsample_bytree"] = 0.9
# params["silent"] = 1
# params["max_depth"] = 8
# params["min_child_weight"] = 4
# params["gamma"] = 1
# params["num_class"] = 7
# params["eval_metric"] = 'merror'
# model = xgb.train(list(params.items()), xgtrain, 1000, watchlist, early_stopping_rounds= 30)
# plot_importance(model, 'size_feature_important_xgb.png')
# pred = model.predict(xgtest) + 1
# # sle = StandardScaler()
# # sle.fit(train)
# # train = sle.transform
sub = pd.read_csv('result/merge.csv')
#sub['size'] = pd.Series([int(x) for x in pred])
sub['size'] = test['workExperienceList'].apply(lambda x : x[2]['size'])
sub.to_csv('result/merge2.csv' , index=False)
def getNewLogFeatures(train, val, test, nameA, nameC):
logoddsCurrSalary, logoddsPCurrSalary, default_logodds_Sal = get_log_count(train, nameA, nameC)
len_train = len(train.columns.values.tolist())
train1 , val1 = generate_log_features(train, val, logoddsCurrSalary, logoddsPCurrSalary, nameA, nameC, default_logodds_Sal)
train1, test1 = generate_log_features(train, test, logoddsCurrSalary, logoddsPCurrSalary, nameA, nameC, default_logodds_Sal)
#return train1[len_train:], val1[len_train:], test1[len_train-1:]
return train1, val1, test1
def preprocess_salary():
print('--- reading input files')
offset =25000
train, test = load_data()
print('--- fill NaN')
train = train.fillna(-1)
test = test.fillna(-1)
# train = DataFrameImputer().fit_transform(train)
# #val = DataFrameImputer().fit_transform(val)
# test = DataFrameImputer().fit_transform(test)
names = train.columns
print('--- Create Major Features')
train = getFeature(train)
test = getFeature(test)
train,test = getFeatureTotal(train, test)
# train = getSizeFeatures(train)
# test = getSizeFeatures(test)
train['salary'] = train['workExperienceList'].apply(lambda x : x[1]['salary'])
# train['position_name'] = train['workExperienceList'].apply(lambda x : x[1]['position_name'])
# #---------------------- filtering out the non-32 positions
# train = train.loc[train['position_name'].isin(jobs)]
# train = train.drop(['position_name'],1)
train, val , train_y, val_y = train[:offset], train[offset:], train['salary'][:offset], train['salary'][offset:]
train = train.drop(['id'], 1)
val = val.drop(['id'], 1)
test = test.drop(['id'], 1)
print('generate log features of position_name')
#feature_list = ['last_salary', 'pre_salary']
#train, val, test = getMultiLogFeatures(train, val, test, feature_list, 'salary')
train = train.drop('salary',1)
val= val.drop('salary',1)
print train.columns
#train_tfidf, val_tfidf, test_tfidf = TFIDFeature(train, val, test, 'last_pos')
train = train.drop(['workExperienceList'], 1)
val = val.drop(['workExperienceList'], 1)
test = test.drop(['workExperienceList'], 1)
create_feature_map(train)
train = np.array(train)
val = np.array(val)
test = np.array(test)
xgtrain = xgb.DMatrix(train, label = train_y)
xgval = xgb.DMatrix(val, label = val_y)
xgtest = xgb.DMatrix(test)
watchlist = [(xgtrain, 'train'),(xgval, 'val')]
params = {}
params["objective"] = 'multi:softmax'
params["eta"] = 0.001
params["subsample"] = 0.7
params["colsample_bytree"] = 1
params["silent"] = 1
params["max_depth"] = 10
params["min_child_weight"] = 100
params["gamma"] = 2
params["num_class"] = 7
params["eval_metric"] = 'merror'
model = xgb.train(list(params.items()), xgtrain, 3000, watchlist, early_stopping_rounds= 40)
pred = model.predict(xgtest)
plot_importance(model, 'salary_feature_important_xgb.png')
sub = pd.read_csv('result/benchmark.csv')
sub['salary'] = pd.Series([int(x) for x in pred])
sub.to_csv('result/salary_pred.csv' , index=False)
def preprocess_pos():
if os.path.isfile('datasets_degree.pkl'):
with open('datasets_degree.pkl', 'rb') as fp:
newtrain = cPickle.load(fp)
train_Y = cPickle.load(fp)
le = cPickle.load(fp)
newtest = cPickle.load(fp)
else :
print('--- reading input files')
offset = 26000
train, test = load_data()
#---------------------- filtering out the non-32 positions
train['position_name'] = train['workExperienceList'].apply(lambda x : x[1]['position_name'])
train = train.loc[train['position_name'].isin(jobs)]
names = train.columns
print('--- Create Major Features')
train = getFeature(train)
test = getFeature(test)
train, test = getFeatureTotal(train, test)
train['salary'] = train['workExperienceList'].apply(lambda x : x[1]['salary'])
# train['rank_pos'] = train['last_pos'].apply(lambda x : pos_dict[x])
# test['rank_pos'] = test['last_pos'].apply(lambda x : pos_dict[x])
# onehfeatures = ['work_age', 'last_salary', 'last_size' ]
# train_oneh , test_oneh = createOneHotFeature(train, test, onehfeatures)
le = LabelEncoder()
train['position_name'] = le.fit_transform(train['position_name'])
train, val , train_y, val_y = train[:offset], train[offset:], train['position_name'][:offset], train['position_name'][offset:]
#train_tfidf, val_tfidf, test_tfidf = TFIDFeature(train, val, test, 'industry')
print('generate log features of position_name')
feature_list = [ 'last_salary', 'last_pos', 'pre_pos' , 'last_size']
train, val, test = getMultiLogFeatures(train, val, test, feature_list, 'position_name')
# train, val, test = getMultiLogFeatures(train, val, test, feature_list, 'degree')
# train, val, test = getMultiLogFeatures(train, val, test, feature_list, 'salary')
train = train.drop(['id', 'salary'], 1)
val = val.drop(['id', 'salary'], 1)
test = test.drop(['id'], 1)
#print('add TruncatedSVD and TSNE features..')
train = train.drop('position_name',1)
val = val.drop('position_name',1)
#train_tfidf, val_tfidf, test_tfidf = TFIDFeature(train, val, test, 'position_name')
# svd = TruncatedSVD(n_components=10, random_state=42)
# train_svd = svd.fit_transform(train_tfidf.toarray())
# val_svd= svd.fit_transform(val_tfidf.toarray())
# test_svd =svd.fit_transform(test_tfidf.toarray())
# tsne = TSNE(n_components=3, random_state=0)
# train_tsne = tsne.fit_transform(train_svd)
# val_tsne = tsne.fit_transform(val_svd)
# test_tsne = tsne.fit_transform(test_svd)
# print train_tsne.shape
train = train.drop(['workExperienceList'], 1)
val = val.drop(['workExperienceList'], 1)
test = test.drop(['workExperienceList'], 1)
create_feature_map(train)
#train_oneh, val_oneh = train_oneh[:offset,:], train_oneh[offset:,:]
train = np.hstack([np.array(train)])
val = np.hstack([np.array(val) ])
test= np.hstack([np.array(test) ])
# train = np.hstack([np.array(train), train_tfidf.toarray()])
# val = np.hstack([np.array(val), val_tfidf.toarray()])
# test = np.hstack([np.array(test), test_tfidf.toarray()])
print train.shape
xgtrain = xgb.DMatrix(train, label = train_y)
xgval = xgb.DMatrix(val, label = val_y)
xgtest = xgb.DMatrix(test)
watchlist = [(xgtrain, 'train'),(xgval, 'val')]
params = {}
params["objective"] = 'multi:softmax'
params["eta"] = 0.1
params["subsample"] = 0.6
params["colsample_bytree"] = 0.75
params["silent"] = 1
params["max_depth"] = 8
params["min_child_weight"] = 5
params["gamma"] = 1
params["num_class"] = 32
params["eval_metric"] = 'merror'
model = xgb.train(list(params.items()), xgtrain, 800, watchlist, early_stopping_rounds= 30)
pred = model.predict(xgtest)
#pred = np.argmax(pred, axis = 1)
plot_importance(model, 'position_feature_important_xgb.png')
pred = [int(x) for x in pred]
sub = pd.read_csv('result/benchmark.csv')
sub['position_name'] = le.inverse_transform(pred)#pd.Series([reverse_dict[x] for x in pred])
sub.to_csv('result/submit_xgb.csv' , index=False)
return pred
def plot_importance(model, fn):
importance = model.get_fscore(fmap='xgb.fmap')
importance = sorted(importance.items(), key=operator.itemgetter(1))
df = pd.DataFrame(importance, columns=['feature', 'fscore'])
df['fscore'] = df['fscore'] / df['fscore'].sum()
plt.figure()
df.plot()
df.plot(kind='barh', x='feature', y='fscore', legend=False, figsize=(10, 6))
# axes = plt.Axes(figure, [.2,.1,.7,.8]) # [left, bottom, width, height] where each value is between 0 and 1
# figure.add_axes(axes)
plt.title('XGBoost Feature Importance')
plt.xlabel('relative importance')
plt.gcf().savefig(fn)
def create_feature_map(train):
features = list(train.columns[:30])
outfile = open('xgb.fmap', 'w')
i = 0
for feat in features:
outfile.write('{0}\t{1}\tq\n'.format(i, feat))
i = i + 1
outfile.close()
def plot_ss_dis():
train, test = load_data()
train['size'] =train['workExperienceList'].apply(lambda x : x[1]['size'])
train['salary'] = train['workExperienceList'].apply(lambda x : x[1]['salary'])
train['size_salary'] = train['workExperienceList'].apply(lambda x : x[1]['size'] * 10 + x[1]['salary'])
plt.hist(train['size_salary'], bins=np.arange(0,79))
#plt.scatter(train['salary'], train['size'])
plt.show()
#train, test = load_data()
#benchmark(train)
#preprocess_degree()
#preprocess_size()
#preprocess_size_salary()
#pred = preprocess_binary_salary()
#preprocess_multi_salary(pred)
preprocess_salary()
#preprocess_pos()
#plot_ss_dis() |
from django.apps import AppConfig
class TastingsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'tastings'
|
try:
from Tkinter import *
except ImportError:
from tkinter import *
class QuantityFrame(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.grid(row=1, column=2, padx=10, pady=10, sticky=N)
self.newq_label = Label(self, text = 'New Quantity:')
self.newq_label.grid(row=0, column=0, sticky=NW)
self.new_quant = IntVar()
self.quantity_entry = Entry(self, textvariable=self.new_quant, state=DISABLED)
self.quantity_entry.grid(row=0, column=1, stick=NW)
self.new_quant.set(0)
self.update_button = Button(self, text = 'Update Quantity', command=root.update_quantity, state=DISABLED)
self.update_button.grid(row=0, column=2, sticky=NE)
def change_state(self, root):
if root.collection.data == None:
self.quantity_entry.config(state=DISABLED)
self.update_button.config(state=DISABLED)
else:
self.quantity_entry.config(state=NORMAL)
self.update_button.config(state=NORMAL)
|
"""
у вас есть список элементов [1, 2, 3, 4, 5, 6, 7, 8]. Перебрать список используя foreach цыкл.
Элемент с нечетным индексом поместить в новый список кортежей где первый элемент это индекс а второй это значение. [(index, value)].
соответственно элементы с четным индексом поместить в другой список кортежей с тем же форматом что и в случае с нечетными индексами.
"""
numbers = [1, 2, 3, 4, 5, 6, 7, 8]
even_index = []
odd_index = []
# for digit in numbers: #for each loop
# if numbers.index(digit) % 2 == 0:
# even_index.append((numbers.index(digit), digit))
# else:
# odd_index.append((numbers.index(digit), digit))
#
# print(f'Elements on even indexes are: {even_index}')
# print(f'Elements on odd indexes are: {odd_index}')
for index in range(len(numbers)): #for loop
if index % 2 == 0:
even_index.append((index, numbers[index]))
else:
odd_index.append((index, numbers[index]))
print(f'Elements on even indexes are: {even_index}')
print(f'Elements on odd indexes are: {odd_index}')
|
from django.shortcuts import render
from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
import requests
import os
import platform
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
def call_url(url):
r = requests.get('{0}'.format(url))
print('status code', r.status_code)
@api_view(['GET'])
def schedule_url(request):
url = request.GET['url']
stamp = request.GET['datetime']
stamp = datetime.strptime(stamp, '%d/%m/%y %H:%M:%S')
scheduler = BackgroundScheduler()
scheduler.add_job(call_url, 'cron', args=[url], second = stamp.second, minute = stamp.minute, hour = stamp.hour, day = stamp.day, month = stamp.month, year = stamp.year)
scheduler.start()
return Response({'message': 'Task Scheduled Successfully!'}, status = status.HTTP_200_OK)
@api_view(['GET'])
def ping_status(request):
param = '-n' if platform.system().lower() == 'windows' else '-c'
response = os.system("ping " + param + " 1 " + request.GET['host'])
if response == 0:
pingstatus = "OK"
else:
pingstatus = "Network Error"
data = {
'status': pingstatus
}
return Response(data, status = status.HTTP_200_OK)
|
from responses.models import SurveyResponse
from responses.serializers import SurveyResponseSerializer, SurveyResponseAgeDepressionSerializer
from django.http import Http404
from rest_framework.views import APIView
from django.views import generic
from rest_framework import generics
from rest_framework.response import Response
from rest_framework import status
from django_filters.rest_framework import DjangoFilterBackend
class SurveyResponseList(generics.ListAPIView):
queryset = SurveyResponse.objects.all()
serializer_class = SurveyResponseAgeDepressionSerializer
filter_backends = [DjangoFilterBackend]
filterset_fields = ['race', 'gender', 'marital_status', 'vaccine_opinion']
|
import grequests
urls = ['http://www.heroku.com','http://www.hackerschool.com','http://www.bbc.com']
def do_something(response, **kwargs):
print response.text
def req(urls):
rs = (grequests.get(u, hooks = {'response':do_something}) for u in urls)
x = grequests.map(rs)
return x
print req(urls) |
"""
TableEntry é uma classe que possui os seguintes campos:
- lexema
- tipo
- ponteiro para o valor
- num da linha
"""
class SymbolTable(object):
def __init__(self):
self.symbolTable = {}
def insertEntry(self, lexema, entry):
self.symbolTable[lexema] = entry;
def getEntry(self, lexema):
return self.symbolTable[lexema] |
from redis import Redis
rd = Redis('119.3.170.97', port=6379, db=3, decode_responses=True)
if __name__ == '__main__':
print(rd.keys('*')) |
#!/usr/bin/env python3
# coding=utf-8
import json
import os
import sys
import time
class Context():
def __getattr__(self,name):
return self.__dict__[name]
def __setattr__(self,name,value):
self.__dict__[name] = value
def get(self,name,default=None):
try:
return self.__getattr__(name)
except KeyError:
return default
def __str__(self):
return str(self.__dict__)
def __contain__(self,name):
return name in self.__dict__.keys()
sys.modules['context'] = Context()
sys.modules['common.context'] = Context()
#print(sys.modules)
c=Context()
c.testKey="test a"
print(c.testKey)
|
a = int(input("Enter no of rows:"))
myList = []
for i in range(a+1):
myList.append("*"*i)
print("",i)
print("\n".join(myList)) |
populationGrowthA = int(
input("Digite a ordem de habitantes da população do país A: "))
populationGrowthB = int(
input("Digite a ordem de habitantes da população do país B: "))
annualGrowthRateA = float(
input("Informe a taxa anual de crescimento da população do país A: "))
annualGrowthRateA = annualGrowthRateA / 100
annualGrowthRateB = float(
input("Informe a taxa anual de crescimento da população do país B: "))
annualGrowthRateB = annualGrowthRateB / 100
countYearPopulationGrowthA = 0
countYearPopulationGrowthB = 0
analysisPeriod = 100
while(populationGrowthB >= populationGrowthA):
populationGrowthA = populationGrowthA + \
(populationGrowthA * annualGrowthRateA)
populationGrowthB = populationGrowthB + \
(populationGrowthB * annualGrowthRateB)
countYearPopulationGrowthA += 1
countYearPopulationGrowthB += 1
print("A partir de {} ano(s) a população A é maior ou igual que a população B".format(
countYearPopulationGrowthA))
|
import pymongo
import pandas
import bs4 as bs
import urllib.request
import re
from socket import error as SocketError
import errno
import pandas as pd
import requests
from datetime import datetime
period1 = 319579200
period2 = 1505145600
def get_historical_price(stock_id, start_date, end_date):
headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1; 125LA; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022)',
'Connection': 'Keep-Alive',
'Content-Type': 'text/plain; Charset=UTF-8',
'Accept': '*/*',
'Accept-Language': 'zh-cn',
'Cookie': 'B=4aos411brv8q6&b=3&s=a7; PRF=t%3D0' + str(stock_id) + '.HK; bnpt=1501232783&pnid=&pnct=',
}
period1 = datetime.strptime(start_date, '%Y-%m-%d').strftime('%s')
period2 = datetime.strptime(end_date, '%Y-%m-%d').strftime('%s')
url = 'https://query1.finance.yahoo.com/v7/finance/download/' + stock_id + '.HK?period1=' + str(period1) + '&period2=' + str(period2) + '&interval=1d&events=history&crumb=tgIGmTK3EA.'
data = requests.get((url),headers=headers).text
price_list = []
data2 = data.split('\n')
print(data2)
for a in data2:
price_list.append(a.split(','))
return price_list
print(get_historical_price('0151', '2015-01-01', '2015-01-20'))
|
#!/usr/bin/env python
#coding:utf8
from . import editor
from models import NodeUtils, LinkUtils, GraphUtils
from flask import render_template, request, json, jsonify
from analysis.views import calculateCommunities
import sys
# 防止中文编译不过
reload(sys)
sys.setdefaultencoding("utf-8")
nodeUtils = NodeUtils()
linkUtils = LinkUtils()
graphUtils = GraphUtils()
# 渲染模板 :转到编辑界面那个HTML
@editor.route('/',methods=['GET','POST'])
def getEditor(projectId):
calculateCommunities(projectId)
communities = GraphUtils.countCommunityPeoples(projectId)
return render_template('editor_pages/index.html', navId = "editor", projectId=projectId, communities = communities)
# 修改节点或关系
@editor.route('/modify', methods=['POST'])
def modify(projectId):
if request.method == 'POST':
if request.values.get('type', "") == 'node':
nodeStr = request.values.get('node', "") # 把客户端的请求信息里的值取出来赋给nodeStr变量
actionStr = request.values.get('act', "")
newNode = json.loads(nodeStr, encoding="utf-8")
return nodeUtils.dispacthNode(projectId, newNode, actionStr) # 调用更改结点函数
elif request.values.get('type', "") == 'link':
linkStr = request.values.get('link', "")
actionStr = request.values.get('act', "")
newLink = json.loads(linkStr, encoding="utf-8") # 把节点的字符串信息转换成json格式,方便后期处理
return linkUtils.dispacthLink(projectId, newLink, actionStr) # 调用更改关系函数
# 给节点增加一个属性
@editor.route('/addproperty', methods=['POST'])
def addNodeProperty(projectId):
if request.method == 'POST':
nodesStr = request.values.get('nodes', "")
property_name = request.values.get('property_name', "")
property_value = request.values.get('property_value', "")
all_nodes = json.loads(nodesStr, encoding="utf-8")
if property_name != "":
for node_obj in all_nodes:
nodeUtils.addProperty(node_obj=node_obj, property_name=property_name, property_value=property_value)
return '' # 调用给一个节点添加属性的函数
# 删除整个图谱
@editor.route('/delete_graph', methods=['POST'])
def deleteAllGraph(projectId):
if request.method == 'POST':
LinkUtils.deleteAllLinks(projectId)
NodeUtils.deleteAllNodes(projectId)
return ''
# 提供一个动态路由地址,获取某个项目的整个图谱(即所有的刷新页面)
@editor.route('/graph', methods=['GET'])
def get_graph(projectId):
nodes = NodeUtils.getAllNodes(projectId)
edges = LinkUtils.getAllLinks(projectId)
return jsonify(elements = {"nodes": nodes, "edges": edges}) #把处理好的数据,整理成json格式,然后返回给客户端 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-30 15:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('catalogues', '0009_auto_20170830_1705'),
]
operations = [
migrations.RenameModel(
old_name='School_view',
new_name='School_beautification',
),
]
|
Using Git
Git is a collaboration tool that is universally used by programmers
Below are the commands in git we will be using the most:
git init - initialize a connection between that folder and git
git add - show off your updates / differences between the repos
git commit -m "message here" - add a message to this instance
git pull origin branch_name - pull down the updates/changes from a repository
git push origin branch_name - push up your changes to a repository
git checkout -b branch_name - change to a different branch on your local machine, or create a new branch on your local machine
git status - tells you what changes are in this folder and what branch you are on
git clone - makes a copy of a targeted repo in a specified folder of your choice
repos will be done with branches
Mutable vs. Immutable
Mutable = Can be changed / mutated
Lists
Dictionaries
Immutable = Cannot be changed or mutated
Strings
Tuples
Inheritance and Scope
Think about the accronym LEGB
Local
Enclosing
Global
Built In
|
import json
import unittest
import responses
import pyyoutube
class ApiMembersTest(unittest.TestCase):
BASE_PATH = "testdata/apidata/members/"
MEMBERS_URL = "https://www.googleapis.com/youtube/v3/members"
MEMBERSHIP_LEVEL_URL = "https://www.googleapis.com/youtube/v3/membershipsLevels"
with open(BASE_PATH + "members_data.json", "rb") as f:
MEMBERS_RES = json.loads(f.read().decode("utf-8"))
with open(BASE_PATH + "membership_levels.json", "rb") as f:
MEMBERSHIP_LEVEL_RES = json.loads(f.read().decode("utf-8"))
def setUp(self) -> None:
self.api = pyyoutube.Api(access_token="Authorize token")
def testGetMembers(self) -> None:
with responses.RequestsMock() as m:
m.add("GET", self.MEMBERS_URL, json=self.MEMBERS_RES)
members = self.api.get_members(parts=["snippet"])
self.assertEqual(members.kind, "youtube#memberListResponse")
self.assertEqual(len(members.items), 2)
members_json = self.api.get_members(
page_token="token",
count=None,
has_access_to_level="high",
filter_by_member_channel_id="id",
return_json=True,
)
self.assertEqual(len(members_json["items"]), 2)
def testGetMembershipLevels(self) -> None:
with responses.RequestsMock() as m:
m.add("GET", self.MEMBERSHIP_LEVEL_URL, json=self.MEMBERSHIP_LEVEL_RES)
membership_levels = self.api.get_membership_levels(parts=["id", "snippet"])
self.assertEqual(
membership_levels.kind, "youtube#membershipsLevelListResponse"
)
self.assertEqual(len(membership_levels.items), 2)
membership_levels_json = self.api.get_membership_levels(return_json=True)
self.assertEqual(len(membership_levels_json["items"]), 2)
|
def stray(arr):
return reduce(lambda prev, curr: prev ^ curr, arr)
|
from django.db import models
from django.contrib.auth import get_user_model
from gdstorage.storage import GoogleDriveStorage
gd_storage = GoogleDriveStorage()
class Run(models.Model):
id = models.AutoField(primary_key=True)
submitting_user = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, null=True, blank=True)
custom_name = models.CharField(max_length=50, blank=True, null=True)
time_ran = models.DateTimeField(auto_now_add=True, blank=True, null=True)
input_image = models.ImageField(null=False, blank=False, upload_to="input_image", storage=gd_storage)
method = models.IntegerField(null=False, blank=False)
successful = models.BooleanField(null=False, blank=False, default=False)
def __str__(self):
if self.custom_name != "":
return "{} ({} - {})".format(self.custom_name, self.id, self.submitting_user.username)
else:
return "Run {} ({} - {})".format(self.id, self.id, self.submitting_user.username)
def display_str(self):
if self.custom_name != "":
return self.custom_name
else:
return "Run {}".format(self.id)
class HarrisCorners(models.Model):
run = models.ForeignKey(Run, on_delete=models.CASCADE)
output_image = models.ImageField(null=False, blank=False, upload_to="HarrisCorners/output_image", storage=gd_storage)
def __str__(self):
return "Run {} - Harris Corners".format(self.run.id)
|
from django.contrib import admin
from polls import models
admin.site.register(models.Vote)
admin.site.register(models.VoteOption)
|
# coding:utf-8
def script(s, player=None):
from NaoQuest.objective import Objective
from NaoSensor.plant import Plant
import NaoCreator.SGBDDialogue.creer as bd
import NaoCreator.Tool.speech_move as sm
if not player:
print("Error in execution of post_script \"testobj1_post\": player is None")
return
if not s.completed:
p1 = bd.Creer(bd.Instruction, bd.DicoVide, 34, 35, 36)
sm.speech_and_move(p1.out())
sm.speech_and_move(u"Pour cette plante, il faut : {}".format(player.current_quest.plante_lie.get_data(Plant.PLANTATION)["planter"]))
|
numero_del_usuario = int(input("deci un numero del 1 al 10 ameo"))
numero_a_adivinar = 11
wea = 1
while numero_del_usuario != numero_a_adivinar and wea != 5:
numero_del_usuario = int(input("trata devuelta intento #" + str(wea)))
wea = wea + 1
if numero_a_adivinar == numero_del_usuario:
print("le pegaste ameo")
if wea == 5:
print("sos terrible pelotudo") |
from django.shortcuts import render
# Create your views here.
#Контролер - функция
def index (request):
return render(request, 'index.html')
def products(request):
return render(request, 'products.html') |
from typing import List, Dict
import toml
import pandas as pd
import boto3
# small trick (hack) so that imports work for both pytest and aws lambda
try:
from .helpers import gen_checkup_id, get_filename, \
RULE_SUMMARY, get_date_as_string
except ImportError:
from helpers import gen_checkup_id, get_filename, \
RULE_SUMMARY, get_date_as_string
try:
from .data_ops import run_query, prepare_data, exclude_dates, exclude_days
except ImportError:
from data_ops import run_query, prepare_data, exclude_dates, exclude_days
def start_checkup(config_string: str):
checkup_id = gen_checkup_id()
print('************ starting checkup #{} ************'
.format(checkup_id))
print('************ reading config ************')
print(config_string)
config = toml.loads(config_string)
print('************ config read complete ************')
print(config)
query = config['datasource']['query']
start = get_date_as_string(config['datasource']['start_date'])
end = get_date_as_string(config['datasource']['end_date'])
query = query.format(start_date=start, end_date=end)
print(start)
print(end)
db = config['datasource']['database']
print('running query: {}', query)
data = run_query(query, db)
print('query done, data size: {} rows'.format(len(data.index)))
prepared_data = prepare_data(start, end, data)
print('data prep done, data size {} rows'.format(len(data.index)))
results, summary = run_rules(config, prepared_data)
output_loc = config['general']['output_location']
output_reg = config['general']['output_region']
checkup_name = config['general']['title']
output_results(results, summary, output_loc,
output_reg, checkup_id, checkup_name)
def output_results(results: pd.DataFrame, summary: List[str], location: str,
region: str, checkup_id: str, checkup_name: str):
s3 = boto3.resource(
's3',
region_name=region)
results_file = get_filename(checkup_id,
'dr-sven_results_' + checkup_name, '.csv')
summary_file = get_filename(checkup_id,
'dr-sven_summary_' + checkup_name, '.md')
concat_summary = ''.join(summary)
results_csv = results.to_csv()
s3.Object(location, results_file).put(Body=results_csv)
s3.Object(location, summary_file).put(Body=concat_summary)
def run_rules(config: Dict, data: pd.DataFrame):
final_summary: List[str] = []
results: pd.DataFrame = pd.DataFrame()
rules = config['rules']['min_records']
for rule in rules:
result, summary = check_rule(data, rule)
print('************ Got result ************')
final_summary.append(summary)
results = pd.concat([results, result])
print('************ All rules processed ************')
print(results)
return results, final_summary
def check_rule(raw: pd.DataFrame, rule: Dict) -> pd.DataFrame:
"""Filters a dataframe on count field where count is less than
min records. Adds symptom and failed rule information to dataframe"""
rule_name = rule['name']
ignore_dates = rule['ignore_dates']
ignore_days = rule['ignore_days']
explain = rule['explanation']
min_records = rule['min_records']
rule_text = 'Expected at least {} records but found {}'
rule_text = rule_text.format(min_records, '{}')
filtered = exclude_dates(raw, ignore_dates)
filtered = exclude_days(filtered, ignore_days)
ignored_count = len(raw.index) - len(filtered.index)
filtered = filtered[filtered['count'] < min_records]
filtered['symptom'] = filtered['count'].map(rule_text.format)
filtered['failed_rule'] = rule_name
passed_count = len(raw.index) - len(filtered.index) - ignored_count
failed_count = len(filtered.index)
summary = RULE_SUMMARY.format(name=rule_name, description=explain,
total=len(raw.index), ignored=ignored_count,
passed=passed_count, failed=failed_count)
print('************ Rule complete ************')
return filtered, summary
|
from common.run_method import RunMethod
import allure
@allure.step("极客数学帮(家长APP)/用户行课班帖/获取未读班贴数量")
def classfeedback_student_unread_note_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课班帖/获取未读班贴数量"
url = f"/service-profile/classfeedback/student/unread/note"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课班帖/获取学生班贴列表")
def classfeedback_student_notes_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课班帖/获取学生班贴列表"
url = f"/service-profile/classfeedback/student/notes"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课班帖/获取学生班贴详情")
def classfeedback_student_note_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课班帖/获取学生班贴详情"
url = f"/service-profile/classfeedback/student/note"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极客数学帮(家长APP)/用户行课班帖/获取学生具体某堂课的班帖详情")
def classfeedback_classId_class_studentId_student_get(classId, studentId, params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极客数学帮(家长APP)/用户行课班帖/获取学生具体某堂课的班帖详情"
url = f"/service-profile/classfeedback/{classId}/class/{studentId}/student"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
|
from mod_base import *
class DelAccount(Command):
"""Permanently delete an existing account.
Usage: delaccount username
"""
def run(self, win, user, data, caller=None):
args = self.args
if args.Empty():
win.Send("Please provide account to delete.")
return False
account_name = args[0].lower()
if not self.bot.config.AccountExists(account_name):
win.Send("That account doesn't exist.")
return False
self.bot.config.RemoveAccount(account_name)
win.Send("Account deleted!")
return True
module = {
"class": DelAccount,
"type": MOD_COMMAND,
"level": 5,
"zone": IRC_ZONE_BOTH,
} |
from rest_framework import serializers
from .models import Deputy, PoliticalParty
class DeputySerializer(serializers.ModelSerializer):
class Meta:
fields = (
'id',
'name',
'party_name',
'declaration_id',
'workplace',
'incomes',
)
model = Deputy
class PoliticalPartySerializer(serializers.ModelSerializer):
class Meta:
fields = (
'party_name',
'description',
)
model = PoliticalParty |
'''
日历模块
'''
import calendar
# 返回指定某年某月的日历
print(calendar.month(2019,5))
# 返回指定年份的日历
# print(calendar.calendar(2018))
# 判断是否是闰年,是返回True;否则返回False
print(calendar.isleap(2008))
# 返回某个月的第一天的weekday(0~6)和当月天数
print(calendar.monthrange(2019,5))
# 返回每个月以每周为元素的列表
print(calendar.monthcalendar(2019,5))
|
'''
如何設計使用上下左右鍵來移動物件,按鍵會移動方向
by Ching-Shoei Chiang
'''
import random, pygame, sys
from pygame.locals import *
pygame.init()
FPS = 30 # frames per second setting
fpsClock = pygame.time.Clock()
# set up the window
screen = pygame.display.set_mode((800, 800), 0, 32)
pygame.display.set_caption('object moving')
WHITE = (255, 255, 255)
circleImg = pygame.image.load('circle64.png')
circlex = circley = 400
dirx = diry = 1
step = 10
updown = True
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key==K_ESCAPE:
pygame.quit()
sys.exit()
if event.key==K_UP:
diry = -1
circley = circley + diry*step
updown = True
elif event.key==K_DOWN:
diry = +1
circley = circley + diry*step
updown = True
elif event.key==K_LEFT:
dirx = -1
circlex = circlex + dirx*step
updown = False
elif event.key==K_RIGHT:
dirx = 1
circlex = circlex + dirx*step
updown = False
if updown and (circley<736 and circley>0):
circley = circley + diry*step
elif not updown and (circlex<736 and circlex>0):
circlex = circlex + dirx*step
screen.blit(circleImg, (circlex, circley))
screen.fill(WHITE)
screen.blit(circleImg, (circlex, circley))
pygame.display.update()
fpsClock.tick(FPS)
|
""" Init file. """
from .base import BaseController #pylint: disable=import-error
from .horizon import HorizonController
from .faults import FaultController
from .interpolator import Interpolator
from .enhancer import Enhancer
from .extender import Extender
from .extractor import Extractor
from .best_practices import * #pylint: disable=wildcard-import
from .utils import * #pylint: disable=wildcard-import
|
import pyowm
import time
import datetime
from datetime import datetime
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
#rpi pin config
RST = 24
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x32 display with hardware SPI:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
now = datetime.now()
owm = pyowm.OWM(API_key='a2e0525a3ab947074b37ba6c33f11288')
obs = owm.weather_at_coords(44.1, -77.58)
wea = obs.get_weather()
temp = wea.get_temperature(unit='celsius')
wind = wea.get_wind()
status = wea.get_status()
dstatus = wea.get_detailed_status()
#the two following current vars are to initialise the clock vars. First referenced in main loop
currentTimeOld = 0
currentSecond = 0
disp.begin()
disp.clear()
disp.display()
width = disp.width
height = disp.height
#critical to have a '1' here for 1-bit color (on/off, no dimming)
image = Image.new('1', (width, height))
draw = ImageDraw.Draw(image)
#blanking drawing, full width and height no-fill rectangle
draw.rectangle((0,0,width,height), outline=0, fill=0)
font = ImageFont.load_default()
# args: left position, top start position, width, bottom position
#draw.rectangle((2, 2, 102, height-2), outline=85, fill=0)
# Write two lines of text.
def firstDisplayDraw():
draw.text((4, 1), '00:00.00 ' + 'Cur:' + str(int(temp['temp'])), font=font, fill=255)
draw.text((4, 10), 'Hi:' + str(int(temp['temp_max'])) + 'c Lo:' + str(int(temp['temp_min'])) + 'c', font=font, fill=255)
draw.text((4, 19), 'Wnd:' + str(int(wind['speed'])) + 'kph ' + str(dstatus), font=font, fill=255)
disp.image(image)
disp.display()
def timeupdate():
draw.rectangle((4,1,60,9), outline=0, fill=0)
draw.text((4, 1), str(currentTime), font=font, fill=255)
disp.image(image)
disp.display()
firstDisplayDraw()
while True:
#START TIME VAR ESTABLISHMENT
now = datetime.now()
if int(currentSecond) != now.second:
currentHour = now.hour - 5
if currentHour < 1:
currentHour += 12
if len(str(currentHour)) == 1:
currentHour = ' ' + str(currentHour)
currentMinute = now.minute
if len(str(currentMinute)) == 1:
currentMinute = '0' + str(currentMinute)
currentSecond = now.second
if len(str(currentSecond)) == 1:
currentSecond = '0' + str(currentSecond)
currentTime = str(currentHour) + ':' + str(currentMinute) + '.' + str(currentSecond)
if currentTime != currentTimeOld:
currentTimeOld = currentTime
timeupdate()
# inputForInterrupt = input('enter some text but plz don\'t hit enter for a few minutes')
|
#!/usr/local/bin/python3
# Test Modules
import sys
import pytest
from pytest import approx
from os import path
# Import module under test
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from python_package_template.constants import *
# Other imports
import math
def test_constants():
assert MY_DUMMY_CONSTANT == 42
if __name__ == '__main__':
test_constants() |
import math
class Robot:
class Servo:
def __init__(self, angle, geometry):
self.deg = angle
self.rad = self._get_rad(self.deg)
self.geometry = geometry
self.max, self.min = self._get_max_min()
self.previous_servos = None
self.total_angle_deg = None
self.total_angle_rad = None
self.delta = None
self.ms = None
def add_previous_servos(self, previous_servos=None):
self.previous_servos = previous_servos
self.total_angle_deg = self.deg
if previous_servos:
for servo in self.previous_servos:
self.total_angle_deg += servo.deg
self.delta = self.deg + previous_servos[-1].deg
if self.delta < 0:
self.delta *= -1
else:
self.delta = self.deg
self.total_angle_rad = self._get_rad(self.total_angle_deg)
def _get_rad(self, deg):
rad = deg * math.pi / 180
return rad
def calc_previous(self):
if self.previous_servos:
self.total_angle_deg = 0
for servo in self.previous_servos:
self.total_angle_deg += servo.deg
self.delta = self.deg + self.previous_servos[-1].deg
if self.delta < 0:
self.delta *= -1
else:
self.delta = self.deg
self.total_angle_deg = self.deg
if self.delta < 0:
self.delta *= -1
self.total_angle_rad = self._get_rad(self.total_angle_deg)
def set_angle(self, angle):
self.deg = angle
self.rad = self._get_rad(self.deg)
self.calc_previous()
def _get_angle(self, ms):
val_min = self.geometry.min
val_max = self.geometry.max
val_mid = self.geometry.mid
change = 1 / 90
if val_min > val_max:
change *= -1
angle = (ms - val_mid) / change
return angle
def _get_max_min(self):
return self._get_angle(self.geometry.max), self._get_angle(self.geometry.min)
def set_ms(self, ms):
self.ms = ms
self.deg = self._get_angle(self.ms)
self.rad = self._get_rad(self.deg)
self.calc_previous()
def get_ms(self, calc=False):
if calc:
change = 1 / 90
if self.geometry.min > self.geometry.max:
change *= -1
self.ms = (self.deg * change + self.geometry.mid)
return self.ms
class Geometry:
def __init__(self, _max, _min, mid):
self.max = _max
self.min = _min
self.mid = mid
def is_inside(self, ms):
return self.min <= ms <= self.max or self.max <= ms <= self.min
class Arm:
def __init__(self, attatched_to, length, height=0.0):
self.attachted_to = attatched_to
self.length = length
self.height = height
class CoordinateSystem:
def __init__(self, x, y):
self.xmin = x[0]
self.xmax = x[1]
self.ymin = y[0]
self.ymax = y[1]
def is_inside(self, x, y):
return self.xmin <= x <= self.xmax and self.ymin <= y <= self.ymax
class Database:
class Entry:
def __init__(self, x, y, effi, s1, s2, s3):
self.x = x
self.y = y
self.effi = effi
self.s1 = s1.deg
self.s2 = s2.deg
self.s3 = s3.deg
def pos_is_equal_to(self, other_entry):
return self.x == other_entry.x and self.y == other_entry.y
def other_efficency_better(self, other_entry):
return self.effi > other_entry.effi
def __init__(self):
self.database = []
def _is_contained(self, entry):
for i, existing_entry in enumerate(self.database):
if existing_entry.pos_is_equal_to(entry):
return existing_entry, i
return None, 0
def get_entry(self, x, y):
wanted = self.Entry(x, y, 0, Robot.Servo(0, Robot.Servo.Geometry(0, 0, 0)),
Robot.Servo(0, Robot.Servo.Geometry(0, 0, 0)),
Robot.Servo(0, Robot.Servo.Geometry(0, 0, 0)))
_return, i = self._is_contained(wanted)
return _return
def add_entry(self, entry):
contained, i = self._is_contained(entry)
if contained:
if contained.other_efficency_better(entry):
self.database[i] = entry
else:
self.database.append(entry)
def serialize(self):
string = ""
for entry in self.database:
string += "{},{}:{},{},{}\n".format(str(entry.x), str(entry.y), str(entry.s1), str(entry.s2),
str(entry.s3))
return string
def deserialize(self, string):
entries = string.split("\n")
for entry in entries:
if entry:
coordinates = entry.split(":")[0].split(',')
servos = entry.split(":")[1].split(',')
new = self.Entry(float(coordinates[0]), float(coordinates[1]), -1,
Robot.Servo(float(servos[0]), Robot.Servo.Geometry(0, 0, 0)),
Robot.Servo(float(servos[1]), Robot.Servo.Geometry(0, 0, 0)),
Robot.Servo(float(servos[2]), Robot.Servo.Geometry(0, 0, 0)))
self.database.append(new)
def __init__(self, s1, s2, s3, coordinatessystem):
self.servo1 = s1
self.servo2 = s2
self.servo3 = s3
self.coordinatesystem = coordinatessystem
self.arm1 = None
self.arm2 = None
self.arm3 = None
self.data = self.Database()
self.x = None
self.y = None
self.efficency = None
def init_depending(self, a1, a2, a3, s1_prev=None, s2_prev=None, s3_prev=None):
self.arm1 = a1
self.arm2 = a2
self.arm3 = a3
self.servo1.add_previous_servos(s1_prev)
self.servo2.add_previous_servos(s2_prev)
self.servo3.add_previous_servos(s3_prev)
self.x, self.y = self._get_position()
self.efficency = self._get_efficency()
def _get_position(self):
x = 0
y = 0
for arm in [self.arm1, self.arm2, self.arm3]:
x += math.sin(arm.attachted_to.total_angle_rad) * arm.length + math.sin(
arm.attachted_to.total_angle_rad + 1.5708) * arm.height
y += math.cos(arm.attachted_to.total_angle_rad) * arm.length + math.cos(
arm.attachted_to.total_angle_rad + 1.5708) * arm.height
return x, y
def _get_efficency(self):
effi = 0
multipliers = [8, 4, 2]
for i, servo in enumerate([self.servo1, self.servo2, self.servo3]):
effi += servo.delta * multipliers[i]
return effi
def calculate(self):
for servo in [self.servo1, self.servo2, self.servo3]:
servo.calc_previous()
self.x, self.y = self._get_position()
self.efficency = self._get_efficency()
self.x = self._round(self.x)
self.y = self._round(self.y)
def _round(self, number, n=None):
if not n:
n = 0
digits = number - math.floor(number)
if digits < 2:
return math.floor(number * 10 ** n) / 10 ** n
elif digits > 8:
return math.ceil(number * 10 ** n) / 10 ** n
|
import numpy as np
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
import RAKE, math, random
from zpar import ZPar
from data import array_data
import torch, sys,os
import pickle as pkl
from copy import copy
from bert.bertinterface import BertEncoding, BertSimilarity
from utils import get_corpus_bleu_scores, appendtext
def output_p(sent, model):
# list
sent = torch.tensor(sent, dtype=torch.long).cuda()
output = model.predict(sent) # 1,15,300003
return output.squeeze(0).cpu().detach().numpy()
def keyword_pos2sta_vec(option,keyword, pos):
key_ind=[]
# pos=pos[:option.num_steps-1]
pos=pos[:option.num_steps-1]
for i in range(len(pos)):
if pos[i]=='NNP':
key_ind.append(i)
elif pos[i] in ['NN', 'NNS'] and keyword[i]==1:
key_ind.append(i)
elif pos[i] in ['VBZ'] and keyword[i]==1:
key_ind.append(i)
elif keyword[i]==1:
key_ind.append(i)
elif pos[i] in ['NN', 'NNS','VBZ']:
key_ind.append(i)
key_ind=key_ind[:max(int(option.max_key_rate*len(pos)), option.max_key)]
sta_vec=[]
for i in range(len(keyword)):
if i in key_ind:
sta_vec.append(1)
else:
sta_vec.append(0)
return sta_vec
def read_data_use(option, sen2id):
file_name = option.use_data_path
max_length = option.num_steps
dict_size = option.dict_size
Rake = RAKE.Rake(RAKE.SmartStopList())
z=ZPar(option.pos_path)
tagger = z.get_tagger()
with open(file_name) as f:
data=[]
vector=[]
sta_vec_list=[]
j=0
for line in f:
sta_vec=list(np.zeros([option.num_steps-1]))
keyword=Rake.run(line.strip())
pos_list=tagger.tag_sentence(line.strip()).split()
# pos=zip(*[x.split('/') for x in pos_list])[0]
pos=list(zip(*[x.split('/') for x in pos_list]))[0]
if keyword!=[]:
keyword=list(list(zip(*keyword))[0])
keyword_new=[]
linewords = line.strip().split()
for i in range(len(linewords)):
for item in keyword:
length11 = len(item.split())
if ' '.join(linewords[i:i+length11])==item:
keyword_new.extend([i+k for k in range(length11)])
for i in range(len(keyword_new)):
ind=keyword_new[i]
if ind<=option.num_steps-2:
sta_vec[ind]=1
if option.keyword_pos==True:
sta_vec_list.append(keyword_pos2sta_vec(option,sta_vec,pos))
else:
sta_vec_list.append(list(np.zeros([option.num_steps-1])))
data.append(sen2id(line.strip().lower().split()))
data_new=array_data(data, max_length, dict_size)
return data_new, sta_vec_list # sentence, keyvector
def read_data_use1(option, sen2id):
file_name = option.use_data_path
max_length = option.num_steps
dict_size = option.dict_size
Rake = RAKE.Rake(RAKE.SmartStopList())
z=ZPar(option.pos_path)
tagger = z.get_tagger()
with open(file_name) as f:
data=[]
vector=[]
sta_vec_list=[]
j=0
for line in f:
print('sentence:'+line)
sta_vec=list(np.zeros([option.num_steps-1]))
keyword=Rake.run(line.strip())
pos_list=tagger.tag_sentence(line.strip()).split()
# pos=zip(*[x.split('/') for x in pos_list])[0]
pos=list(zip(*[x.split('/') for x in pos_list]))[0]
print(keyword)
if keyword!=[]:
keyword=list(list(zip(*keyword))[0])
keyword_new=[]
for item in keyword:
tem1=[line.strip().split().index(x) for x in item.split() if x in line.strip().split()]
print('id',tem1)
keyword_new.extend(tem1)
print(keyword_new)
for i in range(len(keyword_new)):
ind=keyword_new[i]
if ind<=option.num_steps-2:
sta_vec[ind]=1
if option.keyword_pos==True:
sta_vec_list.append(keyword_pos2sta_vec(option,sta_vec,pos))
else:
sta_vec_list.append(list(np.zeros([option.num_steps-1])))
print(keyword_pos2sta_vec(option,sta_vec, pos))
data.append(sen2id(line.strip().lower().split()))
data_new=array_data(data, max_length, dict_size)
return data_new, sta_vec_list # sentence, keyvector
def choose_action(c):
r=np.random.random()
c=np.array(c)
for i in range(1, len(c)):
c[i]=c[i]+c[i-1]
for i in range(len(c)):
if c[i]>=r:
return i
def sigma_word(x):
if x>0.7:
return x
elif x>0.65:
return (x-0.65)*14
else:
return 0
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigma_word1(x):
if x>0.9:
return x
elif x>0.8:
return (x-0.8)*9
else:
return 0
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigma_word_bert(x):
# x:K,
x9 = torch.gt(x,0.9).float()
x8 = torch.gt(x,0.8).float()
return x*x9+(x-0.8)*9*x8
def sigma_bleu(x):
if x>0.9:
return 1-x+0.01 # 0.1-0
elif x>0.8:
return 1-(x-0.8)*9 # 0.1-1
else:
return 1
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def sen2mat(s, id2sen, emb_word, option):
mat=[]
for item in s:
if item==option.dict_size+2:
continue
if item==option.dict_size+1:
break
word=id2sen([item])[0]
if word in emb_word:
mat.append(np.array(emb_word[word]))
else:
mat.append(np.random.random([option.hidden_size]))
return np.array(mat)
def similarity_semantic(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
K = 4
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
res = np.power(semantics,K)
return res
def similarity_semantic_bleu(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
K = 12
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
bleus = []
for s1 in s1_list:
actual_word_lists = [[id2sen(s2)]*len(s1_list)]
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[1]
bleus.append(bleu_score)
bleus = (1.0-sigmoid(np.minimum(bleus,0.999)))
semantics = np.power(semantics,K)
res = bleus*semantics
return res
def similarity_semantic_keyword(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
C1 = 0.1
K = 4
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
res = np.power(semantics,K)
semantics = []
for s, s1 in zip(res, s1_list):
tem = 1
for i,x in zip(sta_vec,s2):
if i==1 and x not in s1:
tem *= C1
semantics.append(s*tem)
res = np.array(semantics)
return res
def similarity_keyword(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
#wei2=normalize( np.array([-np.log(id2freq[x]) for x in s2 if x<=config.dict_size]))
emb2=sen2mat(s2, id2sen, emb_word, option) # N*k
wei2=np.array(sta_vec[:len(emb2)]).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word(sim)
sims.append(sim)
res = np.array(sims)
return res
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_batch(s1_lists, s2s, sta_vecs, id2sen, emb_word, option, model = None):
simss= []
for s1_list,s2, sta_vec in zip(s1_lists,s2s, sta_vecs):
sims = similarity_keyword(s1_list, s2, sta_vec, id2sen, emb_word, option, model)
simss.append(sims)
return simss
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_tensor(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
N_candidant = len(s1_list)
sims= []
embs = []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
embs.append(np.expand_dims(emb1,axis=0))
emb1 = np.concatenate(embs,0) # K,8,300
emb1 = torch.tensor(emb1, dtype=torch.float).permute(0,2,1).cuda()
emb2= sen2mat(s2, id2sen, emb_word, option) # N*k
emb2 = torch.tensor(emb2, dtype=torch.float).unsqueeze(0).repeat(N_candidant,1,1).cuda()
# print(emb1.size(), emb2.size()) #bs,300,7, bs,8,300
wei2= torch.tensor([0]+sta_vec[:emb2.size(1)-1],dtype=torch.uint8) #8
emb_mat = torch.bmm(emb2,emb1) # K,8,7
norm2 = 1/(torch.norm(emb2,p= 2,dim=2)+e) # K,8,8
norm1 = 1/(torch.norm(emb1,p= 2,dim=1)+e) # K,7,7
norm2 = torch.diag_embed(norm2) # K,15,15
norm1 = torch.diag_embed(norm1)
sim_mat = torch.bmm(torch.bmm(norm2, emb_mat), norm1) # K,8,7
sim_vec,_ = torch.max(sim_mat,2) # K,8
sim,_ = torch.min(sim_vec[:,wei2],1)
sim = sigma_word_bert(sim)
return sim.cpu().numpy()
def similarity_keyword_bleu(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
#wei2=normalize( np.array([-np.log(id2freq[x]) for x in s2 if x<=config.dict_size]))
emb2=sen2mat(s2, id2sen, emb_word, option) # N*k
wei2=np.array(sta_vec[:len(emb2)]).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word(sim)
sims.append(sim)
bleus = []
for s1 in s1_list:
actual_word_lists = [[id2sen(s2)]*len(s1_list)]
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[3]
bleus.append(bleu_score)
# bleus = (1.0-sigmoid(np.minimum(bleus,0.9999)))
bleus = (1.0-np.minimum(bleus,0.99))
res = np.array(sims)*bleus
return res
def similarity_keyword_bert(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))]
sourcesent = sourcesent+sourcesent2
emb = model.get_representation(sourcesent)
N_candidant = len(s1_list)
emb2 = emb[-1,:,:].unsqueeze(0).repeat(N_candidant,1,1) # K,15*d
emb1 = emb[:-1,:,:].permute(0,2,1) #K,d,15
wei2= torch.tensor([0]+sta_vec,dtype=torch.uint8)
emb_mat = torch.bmm(emb2,emb1) # K,15,15
norm2 = 1/(torch.norm(emb2,p= 2,dim=2)+e) # K,15
norm1 = 1/(torch.norm(emb1,p= 2,dim=1)+e) # K,15
norm2 = torch.diag_embed(norm2) # K,15,15
norm1 = torch.diag_embed(norm1)
sim_mat = torch.bmm(torch.bmm(norm2, emb_mat), norm1) # K,15,15
sim_vec,_ = torch.max(sim_mat,2) # K,15
sim,_ = torch.min(sim_vec[:,wei2],1)
sim = sigma_word_bert(sim)
return sim.cpu().numpy()
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_bert_bleu(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))]
sourcesent = sourcesent+sourcesent2
emb = model.get_representation(sourcesent).numpy()
emb2 = emb[-1,:,:]
actual_word_lists = [[id2sen(s2)]]
bleus = []
for i,s1 in enumerate(s1_list):
emb1 = emb[i,:,:]
wei2=np.array([0]+sta_vec).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word1(sim)
sims.append(sim)
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[3]
bleu_score = sigma_bleu(bleu_score)
bleus.append(bleu_score)
# bleus = (1.0-sigmoid(np.minimum(bleus,0.9999)))
res = np.array(sims)*np.array(bleus)
return res
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def cut_from_point(input, sequence_length, ind,option, mode=0):
batch_size=input.shape[0]
num_steps=input.shape[1]
input_forward=np.zeros([batch_size, num_steps])+option.dict_size+1
input_backward=np.zeros([batch_size, num_steps])+option.dict_size+1
sequence_length_forward=np.zeros([batch_size])
sequence_length_backward=np.zeros([batch_size])
for i in range(batch_size):
input_forward[i][0]=option.dict_size+2
input_backward[i][0]=option.dict_size+2
length=sequence_length[i]-1
for j in range(ind):
input_forward[i][j+1]=input[i][j+1]
sequence_length_forward[i]=ind+1
if mode==0:
for j in range(length-ind-1):
input_backward[i][j+1]=input[i][length-j]
sequence_length_backward[i]=length-ind
elif mode==1:
for j in range(length-ind):
input_backward[i][j+1]=input[i][length-j]
sequence_length_backward[i]=length-ind+1
return input_forward.astype(np.int32), input_backward.astype(np.int32), sequence_length_forward.astype(np.int32), sequence_length_backward.astype(np.int32)
def generate_candidate_input(input, sequence_length, ind, prob, search_size, option, mode=0):
input_new=np.array([input[0]]*search_size)
sequence_length_new=np.array([sequence_length[0]]*search_size)
length=sequence_length[0]-1
if mode!=2:
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
if mode==2:
for i in range(sequence_length[0]-ind-2):
input_new[: , ind+i+1]=input_new[: , ind+i+2]
for i in range(sequence_length[0]-1, option.num_steps-1):
input_new[: , i]=input_new[: , i]*0+option.dict_size+1
sequence_length_new=sequence_length_new-1
return input_new[:1], sequence_length_new[:1]
if mode==1:
for i in range(0, sequence_length_new[0]-1-ind):
input_new[: , sequence_length_new[0]-i]=input_new[: , sequence_length_new[0]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[i][ind+1]=ind_token[i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def generate_candidate_input_batch(input, sequence_length, ind, prob, search_size, option, mode=0,\
calibrated_set=None):
# input, K,L; prob, K,vocab
input_new=np.array([[inp]*search_size for inp in input]) # K,100,L
sequence_length_new=np.array([[length]*search_size for length in sequence_length]) #K,100
length=sequence_length[0]-1
if mode!=2:
ind_token=np.argsort(prob[:,: option.dict_size],1)[-search_size:] #K,100
print(ind_token.shape)
if mode==2:
for k in range(len(input)):
for i in range(sequence_length[k]-ind-2):
input_new[k,: , ind+i+1]=input_new[k,: , ind+i+2]
for i in range(sequence_length[k]-1, option.num_steps-1):
input_new[k,: , i]=input_new[k,:, i]*0+option.dict_size+1
sequence_length_new=sequence_length_new-1
return input_new, sequence_length_new
if mode==1:
for k in range(len(input)):
for i in range(0, sequence_length_new[k]-1-ind):
input_new[: , sequence_length_new[k]-i]=input_new[: , sequence_length_new[k]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[:,i,ind+1]=ind_token[:,i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def generate_candidate_input_calibrated(input, sequence_length, ind, prob, searching_size, option,\
mode=0, calibrated_set = None):
search_size = searching_size
if mode!=2:
if calibrated_set is None:
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
else:
search_size = searching_size+len(calibrated_set)
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
ind_token = np.concatenate([ind_token,np.array(input[0])],0)
input_new=np.array([input[0]]*search_size)
sequence_length_new=np.array([sequence_length[0]]*search_size)
length=sequence_length[0]-1
if mode==2:
print(input_new, ind)
for i in range(sequence_length[0]-ind-2):
input_new[: , ind+i+1]=input_new[: , ind+i+2]
for i in range(sequence_length[0]-1, option.num_steps-1):
input_new[: , i]=input_new[: , i]*0+option.dict_size+1
print(input_new, ind)
sequence_length_new=sequence_length_new-1
return input_new[:1], sequence_length_new[:1]
if mode==1:
for i in range(0, sequence_length_new[0]-1-ind):
input_new[: , sequence_length_new[0]-i]=input_new[: , sequence_length_new[0]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[i][ind+1]=ind_token[i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def normalize(x, e=0.05):
tem = copy(x)
return tem/tem.sum()
def sample_from_candidate(prob_candidate):
return choose_action(normalize(prob_candidate))
def samplep(probs):
N = probs.shape[1]
M = probs.shape[0]
samples = []
for i in range(M):
a = np.random.choice(range(N), 1, replace=True, p=probs[i])
samples.append(a[0])
return np.array(samples)
def just_acc(option):
r=np.random.random()
if r<option.just_acc_rate:
return 0
else:
return 1
def getp(probabilities,input, lengths, option):
tems = []
for probs,inp, length in zip(probabilities,input,lengths):
tem = 1
for i in range(length-1):
tem*= probs[i][inp[i+1]]
tem*= probs[length-1][option.dict_size+1]
tems.append(tem)
return tems
class StrToBytes:
def __init__(self, fileobj):
self.fileobj = fileobj
def read(self, size):
return self.fileobj.read(size).encode()
def readline(self, size=-1):
return self.fileobj.readline(size).encode()
def data_type():
return tf.float32
class PTBModel(object):
#The language model.
def __init__(self, is_training, option,is_test_LM=False):
self._is_training = is_training
self.batch_size = option.batch_size
self.num_steps = option.num_steps
size = option.hidden_size
self.hidden_size = option.hidden_size
self.num_layers = option.num_layers
self.keep_prob = option.keep_prob
vocab_size = option.vocab_size
self._input=tf.placeholder(shape=[None, option.num_steps], dtype=tf.int32)
self._target=tf.placeholder(shape=[None, option.num_steps], dtype=tf.int32)
self._sequence_length=tf.placeholder(shape=[None], dtype=tf.int32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, self._input)
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
if is_training and option.keep_prob < 1:
inputs = tf.nn.dropout(inputs, option.keep_prob)
output = self._build_rnn_graph(inputs, self._sequence_length, is_training)
output=tf.reshape(output, [-1, option.hidden_size])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [-1, self.num_steps, vocab_size])
self._output_prob=tf.nn.softmax(logits)
# Use the contrib sequence loss and average over the batches
mask=tf.sequence_mask(lengths=self._sequence_length, maxlen=self.num_steps, dtype=data_type())
loss = tf.contrib.seq2seq.sequence_loss(
logits,
self._target,
mask,
average_across_timesteps=True,
average_across_batch=True)
# Update the cost
self._cost = loss
#self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),
option.max_grad_norm)
optimizer = tf.train.AdamOptimizer()
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.train.get_or_create_global_step())
def _build_rnn_graph(self, inputs, sequence_length, is_training):
return self._build_rnn_graph_lstm(inputs, sequence_length, is_training)
def _get_lstm_cell(self, is_training):
return tf.contrib.rnn.BasicLSTMCell(
self.hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)
def _build_rnn_graph_lstm(self, inputs, sequence_length, is_training):
"""Build the inference graph using canonical LSTM cells."""
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def make_cell():
cell = self._get_lstm_cell( is_training)
if is_training and self.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=self.keep_prob)
return cell
cell = tf.contrib.rnn.MultiRNNCell(
[make_cell() for _ in range(self.num_layers)], state_is_tuple=True)
outputs, states=tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=data_type())
return outputs
def run_epoch(sess, model, input, sequence_length, target=None, mode='train'):
#Runs the model on the given data.
if mode=='train':
#train language model
_,cost = sess.run([model._train_op, model._cost], feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
elif mode=='test':
#test language model
cost = sess.run(model._cost, feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
else:
#use the language model to calculate sentence probability
output_prob = sess.run(model._output_prob, feed_dict={model._input: input, model._sequence_length:sequence_length})
return output_prob
def metropolisHasting(option, dataclass,forwardmodel, backwardmodel):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = BertSimilarity()
similarity = similarity_keyword #similarity_semantic
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
for sen_id in range(use_data.length):
#generate for each sentence
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
for iter in range(option.sample_time):
#ind is the index of the selected word, regardless of the beginning token.
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if input_candidate[prob_candidate_ind][ind+1]<option.dict_size and\
(prob_candidate_prob>prob_old_prob*option.threshold or just_acc(option)==0):
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
input= input1
print(' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
#alpha is acceptance ratio of current proposal
alpha=min(1, prob_candidate_prob*option.action_prob[2]/(prob_old_prob*option.action_prob[1]*prob_candidate_norm[prob_candidate_ind]))
if choose_action([alpha, 1-alpha])==0 and \
input_candidate[prob_candidate_ind][ind]<option.dict_size and \
(prob_candidate_prob>prob_old_prob* option.threshold or just_acc(option)==0):
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
sta_vec.insert(ind, 0.0)
del(sta_vec[-1])
print(' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, None, option.search_size, option, mode=action)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
# original sentence
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=0)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=0)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
alpha=min(prob_candidate_norm[candidate_ind]*prob_new_prob*option.action_prob[1]/(option.action_prob[2]*prob_old_prob), 1)
else:
alpha=0
if choose_action([alpha, 1-alpha])==0 and (prob_new_prob> prob_old_prob*option.threshold or just_acc(option)==0):
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
del(sta_vec[ind])
sta_vec.append(0)
pos -= 1
print(' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
return generateset
def simulatedAnnealing_bat(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
print('xxxxxxxxxx')
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
print('xxxxxxxxxx')
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
print('xxxxxxxxxx')
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
print('xxxxxxxxxx')
init = tf.global_variables_initializer()
session = tf.Session()
session.run()
# saver_forward.restore(session, option.forward_save_path)
# saver_backward.restore(session, option.backward_save_path)
print('xxxxxxxxxx')
generate_candidate = generate_candidate_input_batch
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword_batch
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
batch_size = 20
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures, use_data.length)
print(use_data.length/batch_size)
for sen_id in range(int(use_data.length/batch_size)):
sta_vec=sta_vec_list[sen_id*batch_size:sen_id*batch_size+batch_size]
input, sequence_length, _=use_data(batch_size, sen_id)
input_original=input
N_input = len(input)
sta_vec_original = [x for x in sta_vec]
pos=0
for sta, sent in zip( sta_vec, input):
print(' '.join(id2sen(sent)))
print(sta)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(np.max(sequence_length))
action=choose_action(option.action_prob)
action = 0
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use') # K,L,Vocab
prob_old_prob = getp(prob_old,input, sequence_length, option) # K,
input_ = [[x] for x in input]
similarity_old=similarity(input_, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel) #K,
V_old = prob_old_prob*np.concatenate(similarity_old,0)
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
prob_forward=run_epoch(session, mtest_forward, input_forward,\
sequence_length_forward, mode='use')[:, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward,\
sequence_length_backward, mode='use')[:, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward) #K,vocab
input_candidate, sequence_length_candidate=generate_candidate(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set) # K,100,15
input_candidate_flat = input_candidate.reshape(-1,option.num_steps)
sequence_length_candidate_flat = sequence_length_candidate.reshape(-1)
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate_flat,\
sequence_length_candidate_flat, mode='use') #K*100,15,vocab
prob_candidate = getp(prob_candidate_pre,
input_candidate_flat,sequence_length_candidate_flat, option) # K*100
prob_candidate = np.array(prob_candidate).reshape(N_input,-1) # K,100
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel) # K,100
similarity_candidate = np.concatenate(similarity_candidate,0).reshape(N_input,-1)
prob_candidate=prob_candidate*similarity_candidate # K,100
prob_candidate_norm= prob_candidate/prob_candidate.sum(1,keepdims=True)
prob_candidate_ind=samplep(prob_candidate_norm)
prob_candidate_prob= torch.gather(torch.tensor(prob_candidate),1,\
torch.tensor(prob_candidate_ind,dtype=torch.long).view(N_input,1)) # 5,1
prob_candidate_prob = prob_candidate_prob.squeeze().numpy()
V_new = np.log(np.maximum(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = np.log(np.maximum(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = np.minimum(1,np.exp(np.minimum((V_new-V_old)/temperature,100)))
for i,inp in enumerate(input):
alpha = alphat[i]
chooseind = prob_candidate_ind[i]
if choose_action([alpha, 1-alpha])==0:
input1=input_candidate[i][chooseind]
if np.sum(input1)==np.sum(inp):
pass
else:
input[i] = input1
# calibrated_set.append(input[i][ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[i])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True, option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True, option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
print('line1295-------------------')
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
print('line1295-------------------')
# if tfflag:
# with tf.name_scope("forward_train"):
# with tf.variable_scope("forward", reuse=None):
# m_forward = PTBModel(is_training=True,option=option)
# with tf.name_scope("forward_test"):
# with tf.variable_scope("forward", reuse=True):
# mtest_forward = PTBModel(is_training=False,option=option)
# var=tf.trainable_variables()
# var_forward=[x for x in var if x.name.startswith('forward')]
# saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
# with tf.name_scope("backward_train"):
# with tf.variable_scope("backward", reuse=None):
# m_backward = PTBModel(is_training=True,option=option)
# with tf.name_scope("backward_test"):
# with tf.variable_scope("backward", reuse=True):
# mtest_backward = PTBModel(is_training=False, option=option)
# var=tf.trainable_variables()
# var_backward=[x for x in var if x.name.startswith('backward')]
# saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
# init = tf.global_variables_initializer()
# session = tf.Session()
# session.run(init)
# saver_forward.restore(session, option.forward_save_path)
# saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
# for i in range(1,option.num_steps):
# if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
# sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate[prob_candidate_ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_std(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
print(temperature)
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate[prob_candidate_ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_calibrated(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 2
temperatures = 0.3+ C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(prob_candidate_prob,1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(prob_candidate_prob, 1e-200))
V_old = math.log(max(prob_old_prob*prob_candidate_norm[prob_candidate_ind],1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
# debug
# print('xxxx', sequence_length, sta_vec)
# tem=1
# prob_old=run_epoch(session, mtest_forward, input,\
# sequence_length,mode='use')[0]
# for j in range(sequence_length[0]-1):
# tem*=prob_old[j][input[0][j+1]]
# print(tem,)
# tem*=prob_old[j+1][option.dict_size+1]
# print(tem)
# similarity_old=similarity(input, input_original,sta_vec,\
# id2sen, emb_word, option, similaritymodel)[0]
# print(similarity_old)
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
# original sentence
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=0)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=0,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(prob_new_prob*prob_candidate_norm[candidate_ind],1e-300))
V_old = math.log(max(prob_old_prob,1e-300))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_pytorch(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
sim=option.sim
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
generated_sentence = []
fileemb = open(option.emb_path,'rb')
emb_word,emb_id=pkl.load(fileemb, encoding = 'latin1')
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
C = 1 # 0.2
for sen_id in range(use_data.length):
#generate for each sentence
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
sta_vec.insert(0, 0.0)
del(sta_vec[-1])
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print('Origin Sentence:')
print(' '.join(id2sen(input[0])))
print(sta_vec)
print('Paraphrase:')
for iter in range(option.sample_time):
#ind is the index of the selected word, regardless of the beginning token.
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
steps = float(iter/(sequence_length[0]-1))
temperature = C/(math.log(steps+2))
if action==0: # word replacement (action: 0)
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
sim_new = similarity_candidate[prob_candidate_ind]
sim_old =similarity_old
V_new = math.log(max(prob_candidate_prob,1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,10)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
input= input1
print('oldprob,vold, vnew,simold, simnew',prob_old_prob,V_old, V_new,sim_old, sim_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
action = 3
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
tem = np.power(tem,(sequence_length[0]*1.0)/(sequence_length_candidate[0]))
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,\
option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
#alpha is acceptance ratio of current proposal
sim_new = similarity_candidate[prob_candidate_ind]
sim_old =similarity_old
V_new = math.log(max(prob_candidate_prob, 1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size and (prob_candidate_prob>prob_old_prob* option.threshold):
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
#del(sta_vec[-1])
print(sta_vec)
print('vold, vnew,simold, simnew',V_old, V_new,sim_old, sim_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
action = 3
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
prob_old = output_p(input, forwardmodel)
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word, \
option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, None, option.search_size, option, mode=action)
# delete sentence
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
tem = np.power(tem,sequence_length[0]*1.0/(sequence_length_candidate[0]))
prob_new_prob=tem
if sim!=None:
similarity_new=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_new_prob=prob_new_prob*similarity_new
sim_new = similarity_new[0]
sim_old =similarity_old
V_new = math.log(max(prob_new_prob,1e-300))
V_old = math.log(max(prob_old_prob,1e-300))
alphat = min(1,math.exp((V_new-V_old)/temperature))
if choose_action([alphat, 1-alphat])==0:
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
pos-=1
#del(sta_vec[ind])
#sta_vec.append(0)
print(sta_vec)
print('vold, vnew,simold, simnew',V_old, V_new,sim_old, sim_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
action = 3
if action==3: # word delete
lastaction =0
V_new1 = V_old
V_old1 = V_new
alphat = min(1,math.exp((V_new1-V_old1)/temperature))
if choose_action([alphat, 1-alphat])==0:
if lastaction ==0:
print('cancel')
pos += 1
generated_sentence.append(id2sen(input[0]))
return generated_sentence
def simulatedAnnealing_tem1(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = BertSimilarity()
similarity = similarity_keyword #similarity_semantic
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
for sen_id in range(use_data.length):
#generate for each sentence
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
for iter in range(option.sample_time):
#ind is the index of the selected word, regardless of the beginning token.
temperature = 1
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(prob_candidate_prob,1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
input= input1
print('oldprob,vold, vnew,simold, simnew',prob_old_prob,V_old, V_new,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(prob_candidate_prob, 1e-200))
V_old = math.log(max(prob_old_prob*prob_candidate_norm[prob_candidate_ind],1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size and (prob_candidate_prob>prob_old_prob* option.threshold):
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
sta_vec.insert(ind, 0.0)
del(sta_vec[-1])
print('vold, vnew,simold, simnew',V_old, V_new,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, None, option.search_size, option, mode=action)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
# original sentence
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=0)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=0)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(prob_new_prob*prob_candidate_norm[candidate_ind],1e-300))
V_old = math.log(max(prob_old_prob,1e-300))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0 and (prob_new_prob>prob_old_prob*option.threshold):
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
del(sta_vec[ind])
sta_vec.append(0)
pos -= 1
print(' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
return generateset
|
#!/usr/bin/env python
import redis
import re
import settings
import hashlib
r = settings.r
class Timeline:
def page(self,page):
pageStart = (page-1)*10
pageEnd = (page)*10
return [Post(post_id) for post_id in r.lrange('timeline',pageStart,pageEnd)]
class Model(object):
def __init__(self,id):
self.__dict__['id'] = id
def __eq__(self,other):
return self.id == other.id
def __setattr__(self,name,value):
if name not in self.__dict__:
modelName = self.__class__.__name__.lower()
key = '%s:id:%s:%s' % (modelName,self.id,name.lower())
r.set(key,value)
else:
self.__dict__[name] = value
def __getattr__(self,name):
if name not in self.__dict__:
modelName = self.__class__.__name__.lower()
value = r.get('%s:id:%s:%s' % (modelName,self.id,name.lower()))
if value:
return value
raise AttributeError(' cannot find %s ' % name)
else:
return object.__getattribute__(self,name)
class User(Model):
@staticmethod
def find_by_username(username):
userId = r.get("user:username:%s" % username)
if userId is not None:
return User(int(userId))
else:
return None
@staticmethod
def find_by_id(userId):
if r.exists("user:id:%s:username" % userId):
return User(int(userId))
else:
return None
@staticmethod
def create(username,password):
userId = r.incr("user:uid")
if not r.get("user:username:%s" % username):
r.set("user:id:%s:username" % userId,username)
r.set("user:username:%s" % username,userId)
salt = settings.SALT
encryptPassword = hashlib.md5(salt+password).hexdigest()
r.set("user:id:%s:password" % userId,encryptPassword)
r.lpush("users",userId)
return User(userId)
return None
def posts(self,page=1):
pageStart,pageEnd = (page-1)*10,page*10
posts = r.lrange("user:id:%s:posts" % self.id,pageStart,pageEnd)
if posts:
return [Post(int(postId)) for postId in posts]
return []
def timeline(self,page=1):
pageStart,pageEnd = (page-1)*10,page*10
timeline = r.lrange("user:id:%s:timeline" % self.id,pageStart,pageEnd)
if timeline:
return [Post(int(postId)) for postId in timeline]
return []
def mentions(self,page=1):
pageStart,pageEnd = (page-1)*10,page*10
mentions = r.lrange("user:id:%s:mentions" % self.id,pageStart,pageEnd)
if mentions:
return [Post(int(postId)) for postId in mentions]
return []
def add_post(self,post):
r.lpush("user:id:%s:posts" % self.id, post.id)
r.lpush("user:id:%s:timeline" % self.id,post.id)
r.sadd('posts:id',post.id)
def add_timeline_post(self,post):
r.lpush("user:id:%s:timeline" % self.id,post.id)
def add_mention(self,post):
r.lpush("user:id:%s:mentions" % self.id,post.id)
def follow(self,user):
if user == self:
return
else:
r.sadd("user:id:%s:followees" % self.id,user.id)
user.add_follower(self)
def unfollow(self,user):
r.srem("user:id:%s:followees" % self.id,user.id)
user.remove_follower(self)
def following(self,user):
if r.sismember("user:id:%s:followees" % self.id,user.id):
return True
return False
@property
def followers(self):
followers = r.smembers("user:id:%s:followers" % self.id)
if followers:
return [User(int(userId)) for userId in followers]
return []
@property
def followees(self):
followees = r.smembers("user:id:%s:followees" % self.id)
if followees:
return [User(int(userId)) for userId in followees]
return []
@property
def tweet_count(self):
return r.llen("user:id:%s:posts" % self.id) or 0
@property
def followees_count(self):
return r.scard("user:id:%s:followees" % self.id) or 0
@property
def followers_count(self):
return r.scard("user:id:%s:followers" % self.id) or 0
def add_follower(self,user):
r.sadd("user:id:%s:followers" % self.id, user.id)
def remove_follower(self,user):
r.srem("user:id:%s:followers" % self.id, user.id)
class Post(Model):
@staticmethod
def create(user,content):
postId = r.incr("post:uid")
post = Post(postId)
post.content = content
post.user_id = user.id
user.add_post(post)
r.lpush("timeline",postId)
for follower in user.followers:
follower.add_timeline_post(post)
mentions = re.findall('@\w+',content)
for mention in mentions:
u = User.find_by_username(mention[1:])
if u:
u.add_mention(post)
@staticmethod
def find_by_id(id):
if r.sismember('posts:id',int(id)):
return Post(id)
return None
@property
def user(self):
return User.find_by_id(r.get("post:id:%s:user_id" % self.id))
def main():
pass
if __name__ == '__main__':
main()
|
class SimulationResult:
def __init__(self, lowest_fitness: int, highest_fitness: int, avg_fitness: int, generation: int):
self.lowest_fitness: int = lowest_fitness
self.highest_fitness: int = highest_fitness
self.avg_fitness: int = avg_fitness
self.generation: int = generation
def get_distance_lowest(self) -> int:
return -self.lowest_fitness
def get_distance_highest(self) -> int:
return -self.highest_fitness
def get_distance_avg(self) -> int:
return -self.avg_fitness
def __str__(self) -> str:
return "{}: [{}, {}, {}]".format(self.generation, self.lowest_fitness, self.highest_fitness, self.avg_fitness)
|
from django.conf.urls import url
from views import *
urlpatterns = [
url(r'^$', home, name = 'home'),
url(r'add_members$', add_members, name = 'add_members'),
url(r'show$', show, name = 'show'),
url(r'show_updated$', show_by_updated, name = 'show_by_updated'),
url(r'show_stale$', show_stale, name = 'show_stale'),
url(r'show_months$', show_months, name = 'show_months'),
url(r'update/(?P<num>\d+)$', update, name = 'update'),
# updates members searched recently
url(r'update/existing$', update_existing, name = 'update_existing'),
# updates saved members
url(r'update/searched$', update_searched, name = 'update_searched'),
url(r'update/by_id/(?P<id>\d+)$$', update_by_id, name = 'update_by_id'),
url(r'csv_print$', csv_print, name = 'csv_print'),
# url(r'(?P<gov_id>\d+)$', member_home, name = 'member_home'),
]
|
import imgpr as ip
import numpy as np
image = ip.image.openImage("example.png")
height, width = image.shape[:2]
cut = 150
init_energy = np.zeros(shape=(height, width), dtype=int)
init_energy[520:,:] -= 14
x = ip.placeholder(shape=(height, width))
s = ip.layers.seam(x, iters=cut, init_energy=init_energy, direction=ip.VERTICAL)
y = ip.layers.sew(x, s, cut, shape=(height - cut, width), direction=ip.VERTICAL)
with ip.Session() as sess:
output = sess.run(y, feed_dict={x : image})
ip.image.showImages([[image, output]])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.