text stringlengths 8 6.05M |
|---|
# Python language basics 6
# Functions
# Implementing, calling, parameters, return values
x_pos = 0
e_x_pos = 4
print(x_pos)
# Function to increase x_pos by 1
def move():
global x_pos # must retrieve the global value
x_pos += 1 # changes the value of the global x_pos
move() # runs the code in the move function
# x_pos = 1
# Function to increase x_pos by 'amount'
def move_by(amount):
global x_pos
x_pos += amount
# Functions to check if player and enemy collide
# Output True if the collide and False if the dont
def check_for_collision():
global x_pos
global e_x_pos
if x_pos == e_x_pos:
return True
else:
return False
move_by(5) # need to pass in a value for 'amount'
# x_pos = 6
did_collide = check_for_collision()
print(x_pos)
print(did_collide)
|
#!/usr/bin/env python
import rospy
import signal
import sys
import math
import tf
import PyKDL
import copy
from std_msgs.msg import String
from nav_msgs.msg import Path
from sensor_msgs.msg import JointState
import numpy as np
REFERENCE_FRAME='panda_link0'
class Simulator(object):
def __init__(self):
self._tl = tf.TransformListener()
self._br = tf.TransformBroadcaster()
rospy.sleep(1.) # sleep a bit to make sure the TF cache is filled
#Rviz value
self._viz_pub = rospy.Publisher("/viz/joint_states", JointState, queue_size = 5)
#Simulator State
self._finger_val = .035
self._current_joint = [0.0,-0.4,0.0,-2.0,0.0,1.6,0.8,self._finger_val,self._finger_val]
self._current_velocity = [0.,0.,0.,0.,0.,0.,0.,0.,0.]
self._freq = 200
self._vel_sub = rospy.Subscriber("/jacob/joint_vel", JointState, self.on_vel)
self._action_sub = rospy.Subscriber("/subaction", String, self.on_command, queue_size = 1)
self._state_sub = rospy.Subscriber("/viz/joint_states", JointState, self.update_position, queue_size = 1)
#Publish init state for the robot
self.init_states()
self._init = False
self._sim = True
def init_states(self):
joint = JointState()
joint.header.stamp = rospy.Time.now()
joint.name = ["panda_joint1", "panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
joint.position = self._current_joint
self._viz_pub.publish(joint)
def on_vel(self,msg):
for i,v in enumerate(msg.velocity):
self._current_velocity[i]=v
def run(self):
r = rospy.Rate(self._freq)
while not rospy.is_shutdown():
self.move()
r.sleep()
def move(self):
for i,v in enumerate(self._current_velocity):
self._current_joint[i]+=v/self._freq
joint = JointState()
joint.header.stamp = rospy.Time.now()
joint.name = ["panda_joint1", "panda_joint2","panda_joint3","panda_joint4","panda_joint5","panda_joint6","panda_joint7","panda_finger_joint1","panda_finger_joint2"]
joint.position = list(self._current_joint)
self._viz_pub.publish(joint)
def update_position(self,msg):
global joint_position
joint_position = list(msg.position)
def on_command(self, msg):
global joint_position
if self._sim:
if msg.data == "grasp":
self._finger_val = 0.0
self._current_joint = joint_position
self._current_joint[-1] = self._finger_val
self._current_joint[-2] = self._finger_val
if msg.data == "release":
self._finger_val = .035
self._current_joint = joint_position
self._current_joint[-1] = self._finger_val
self._current_joint[-2] = self._finger_val
def signal_handler(self, signal, frame):
sys.exit(0)
if __name__ == "__main__":
rospy.init_node("simulator")
simulator = Simulator()
signal.signal(signal.SIGINT, simulator.signal_handler)
simulator.run()
|
import numpy as np
import random
import pandas as pd
from matplotlib import pyplot as plt
from shapely.geometry import LineString, MultiPolygon, MultiPoint, box, Polygon,Point
from shapely.ops import split, unary_union
from mpl_toolkits import mplot3d
from scipy.optimize import curve_fit
import seaborn as sns
sns.set()
def get_one_square_box(side: int):
"""Create a square box
:param side: length of a side of the box. coord starts from negative x to 0, positive y to 0
:return: a box on second quadrant
"""
b = box(-side, 0.0, 0.0, side)
return b, side
def modify_random_shape_of_box(b, s):
"""Add testing shape on a box to get a polygon
:param b: box on second quadrant
:param s: length of each side of the box
:return: polygon on second quadrant
"""
random_tuples_right = np.array([[0, 0], [0, 1]])
random_tuples_left = np.array([-s, 1])
random_tuples_up = np.array([0, s])
random_tuples_down = np.array([[0, 0], [-1, 0]])
for i in range(s): # random shape generated
if i == 0 or i == 1:
continue
random_num = np.random.randint(-i+1, 0)
random_tuples_right = np.vstack([random_tuples_right, np.array([random_num, i])])
random_tuples_down = np.vstack([random_tuples_down, np.array([-i, -random_num])])
random_tuples_left = np.vstack([random_tuples_left, np.array([random_num - s, i])])
random_tuples_up = np.vstack([random_tuples_up, np.array([-i, -random_num + s])])
# right down is for subtraction from the box, up left for union
line_right = LineString(np.vstack([random_tuples_right, np.array([0, s])]).tolist())
line_down = LineString(np.vstack([random_tuples_down, np.array([-s, 0])]).tolist())
poly_left = Polygon(np.vstack([random_tuples_left, np.array([-s, s])]).tolist())
poly_up = Polygon(np.vstack([random_tuples_up, np.array([-s, s])]).tolist())
# cut/combine
box_after_r = split(b, line_right) # r is right
box_after_r_d = split(box_after_r[0], line_down) # d is down, the first item is desired polygon
polygons = MultiPolygon([box_after_r_d[0], poly_up]) # intermediate between box_final and poly_left, up
box_after_up = unary_union(polygons)
polygons = MultiPolygon([box_after_up, poly_left])
box_final = unary_union(polygons)
return box_final
def modify_specific_shape_of_box(b, s, string, amp):
"""
:param b: box on second quadrant
:param s: length of each side of the box
:param string: specific function on side of the box
:param amp: amplitude of the function
:return: polygon on second quadrant
"""
# test case sine wave
if string == 'sin':
# input
step = 0.01
start = 0.25
end= 0.75
x_sin_range_temp = np.arange(s * start, end*s, step)
#
x_sin_range = np.append(x_sin_range_temp, np.array(end * s))
y_range = amp * np.append(np.sin((x_sin_range_temp - start*s) * np.pi / (start * s)), np.array(0))
sin_right = np.vstack([np.array([0, 0]),
np.vstack([np.array(list(zip(-1 * y_range, x_sin_range))),
np.array([0, s])])])
sin_left = np.array(list(zip(-1 * y_range - s, x_sin_range)))
sin_down = np.vstack([np.array([0, 0]),
np.vstack([np.array(list(zip(-1 * x_sin_range, y_range))),
np.array([-s, 0])])])
sin_up = np.array(list(zip(-1 * x_sin_range, y_range + s)))
# right down is for subtraction from the box, up left for union
line_right = LineString(sin_right.tolist())
line_down = LineString(sin_down.tolist())
poly_left = Polygon(sin_left.tolist())
poly_up = Polygon(sin_up.tolist())
# cut/combine
box_after_r = split(b, line_right) # r is right
box_after_r_d = split(box_after_r[0], line_down) # d is down, the first item is desired polygon
polygons = MultiPolygon([box_after_r_d[0], poly_up]) # intermediate between box_final and poly_left, up
box_after_up = unary_union(polygons)
polygons = MultiPolygon([box_after_up, poly_left])
box_final = unary_union(polygons)
return box_final
elif string == 'box':
# input
start = 0.25
end = 0.75
list_turning_point_x = np.array([start, start, end, end]) * s
list_turning_point_y = np.array([0, amp, amp, 0])
box_right = list(zip(-list_turning_point_y, list_turning_point_x))
box_left = list(zip(-list_turning_point_y - s, list_turning_point_x))
box_down = list(zip(-list_turning_point_x, list_turning_point_y))
box_up = list(zip(-list_turning_point_x, list_turning_point_y + s))
# right down is for subtraction from the box, up left for union
line_right = LineString(box_right)
line_down = LineString(box_down)
poly_left = Polygon(box_left)
poly_up = Polygon(box_up)
# cut/combine
box_after_r = split(b, line_right) # r is right
box_after_r_d = split(box_after_r[0], line_down) # d is down, the first item is desired polygon
polygons = MultiPolygon([box_after_r_d[0], poly_up]) # intermediate between box_final and poly_left, up
box_after_up = unary_union(polygons)
polygons = MultiPolygon([box_after_up, poly_left])
box_final = unary_union(polygons)
return box_final
def get_pad_array(b, s):
"""
:param b: an modified box on second quadrant
:param s: length of the side of the box
:return: a list of polygon. center to right to SE, S to NE
"""
lists = np.array(list(b.exterior.coords))
lists_after_parallel_r = lists + np.array([s, 0])
b_after_parallel_r = Polygon(lists_after_parallel_r.tolist())
array = list([b, b_after_parallel_r])
return array
def get_pad_nine(b, s):
"""
:param b: an modified box on second quadrant
:param s: length of the side of the box
:return: a list of polygon. center to right to SE, S to NE
"""
lists = np.array(list(b.exterior.coords))
lists_after_parallel_right = lists + np.array([s, 0])
lists_after_parallel_left = lists + np.array([-s, 0])
lists_after_parallel_upright = lists + np.array([s, s])
lists_after_parallel_upcenter = lists + np.array([0, s])
lists_after_parallel_upleft = lists + np.array([-s, s])
lists_after_parallel_lowright = lists + np.array([s, -s])
lists_after_parallel_lowcenter = lists + np.array([0, -s])
lists_after_parallel_lowleft = lists + np.array([-s, -s])
b_after_parallel_right = Polygon(lists_after_parallel_right.tolist())
b_after_parallel_left = Polygon(lists_after_parallel_left.tolist())
b_after_parallel_upright = Polygon(lists_after_parallel_upright.tolist())
b_after_parallel_upcenter = Polygon(lists_after_parallel_upcenter.tolist())
b_after_parallel_upleft = Polygon(lists_after_parallel_upleft.tolist())
b_after_parallel_lowright = Polygon(lists_after_parallel_lowright.tolist())
b_after_parallel_lowcenter = Polygon(lists_after_parallel_lowcenter.tolist())
b_after_parallel_lowleft = Polygon(lists_after_parallel_lowleft.tolist())
array = list([b, b_after_parallel_left, b_after_parallel_upleft, b_after_parallel_upcenter,
b_after_parallel_upright, b_after_parallel_right, b_after_parallel_lowright,
b_after_parallel_lowcenter, b_after_parallel_lowleft])
return array
def simulate_amplitude(l, s, n, r):
"""
:param l: a list of adjacent pads/polygons
:param s: length of side of one box
:param n: the number of random points
:param r: standard d of the laser
:return: graph position resolution vs laser position
"""
path = 2*s+1
list_a = np.array([])
charges_sum_right = np.array([])
charges_sum_center = np.array([])
center_of_pads = np.array([-s / 2, s / 2, s / 2, s / 2])
y_axis = np.array([])
y2_axis = np.array([])
def is_contain(point_list, b):
zeros = np.array([])
for j in range(len(point_list)):
zeros = np.append(zeros, b.contains(Point(point_list[j])))
return zeros
for i in range(path):
coord = [i - s, int(s/2)]
charges_center = np.array([])
charges_right = np.array([])
for j in range(20):
random_points_x = np.random.uniform(coord[0] - r/2, coord[0] + r/2, n)
#noise_x = np.random.uniform(coord[0]-noi/2, coord[0]+noi/2, (n, 1))
#random_points_x = np.add(noise_x, random_points_x)
random_points_y = np.random.uniform(coord[1] - r/2, coord[1] + r/2, n)
#noise_y = np.random.uniform(coord[1]-noi/2, coord[1]+noi/2, (n, 1))
#random_points_y = np.add(noise_y, random_points_y)
random_points = list(zip(random_points_x, random_points_y))
charges_center = np.append(charges_center, np.count_nonzero(is_contain(random_points, l[0])))
charges_right = np.append(charges_right, np.count_nonzero(is_contain(random_points, l[1])))
charge_sum_center = np.sum(charges_center) / 20 #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_right = np.sum(charges_right) / 20 #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_center = charge_sum_center #(charge_sum_center + charge_sum_right)
charge_sum_right = charge_sum_right #(charge_sum_center + charge_sum_right)
#print(i)
#print(charge_sum_right)
#print(charges_sum_center)
y_axis = np.append(charge_sum_center, y_axis)
y2_axis = np.append(charge_sum_right, y2_axis)
x_plot = np.arange(-s, s+1)
plt.plot(x_plot, y_axis, 'ro', x_plot, y2_axis, 'blue')
def simulate_amplitude2(l, s, n, r):
"""
:param l: a list of adjacent pads/polygons
:param s: length of side of one box
:param n: the number of random points
:param r: standard d of the laser
:return: graph position resolution vs laser position twinx
"""
path = 2*s+1
list_a = np.array([])
charges_sum_right = np.array([])
charges_sum_center = np.array([])
center_of_pads = np.array([-s / 2, s / 2, s / 2, s / 2])
y_axis = np.array([])
y2_axis = np.array([])
def is_contain(point_list, b):
zeros = np.array([])
for j in range(len(point_list)):
zeros = np.append(zeros, b.contains(Point(point_list[j])))
return zeros
for i in range(path):
coord = [i - s, int(s/2)]
charges_center = np.array([])
charges_right = np.array([])
for j in range(20):
random_points_x = np.random.uniform(coord[0] - r/2, coord[0] + r/2, n)
#noise_x = np.random.uniform(coord[0]-noi/2, coord[0]+noi/2, (n, 1))
#random_points_x = np.add(noise_x, random_points_x)
random_points_y = np.random.uniform(coord[1] - r/2, coord[1] + r/2, n)
#noise_y = np.random.uniform(coord[1]-noi/2, coord[1]+noi/2, (n, 1))
#random_points_y = np.add(noise_y, random_points_y)
random_points = list(zip(random_points_x, random_points_y))
charges_center = np.append(charges_center, np.count_nonzero(is_contain(random_points, l[0])))
charges_right = np.append(charges_right, np.count_nonzero(is_contain(random_points, l[1])))
charge_sum_center = np.sum(charges_center) / 20 #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_right = np.sum(charges_right) / 20 #/ (np.sum(charges_right) + np.sum(charges_center))
charge_weight = abs((charge_sum_right**1*6 + charge_sum_center**1 * (-6))/(charge_sum_center**1 + charge_sum_right**1) - coord[0])
#print(i)
#print(charge_sum_right)
#print(charges_sum_center)
y_axis = np.append(y_axis, charge_weight)
x_plot = np.arange(-s, s+1)
plt.plot(x_plot, y_axis, 'ro')
def simulate_amplitude3(l, s, n, r, laser):
"""
:param l: a list of adjacent pads/polygons
:param s: length of side of one box
:param n: the number of random points
:param r: standard d of the laser
:return: graph position resolution vs laser position
"""
y_axis = np.array([])
k = 20
def is_contain(point_list, b):
zeros = np.array([])
for j in range(len(point_list)):
zeros = np.append(zeros, b.contains(Point(point_list[j])))
return zeros
random_coord_x = np.random.uniform(-2*s-s/4, s+s/4, laser)
random_coord_y = np.random.uniform(-s-s/4, 2*s+s/4, laser)
list_coord = list(zip(random_coord_x, random_coord_y))
divi = k*n
for z in range(len(list_coord)):
print(z)
coord = list_coord[z]
charges_left = np.array([])
charges_center = np.array([])
charges_right = np.array([])
charges_upleft = np.array([])
charges_upcenter = np.array([])
charges_upright = np.array([])
charges_lowleft = np.array([])
charges_lowcenter = np.array([])
charges_lowright = np.array([])
for j in range(k):
random_points_x = np.random.uniform(coord[0] - r/2, coord[0] + r/2, n)
#noise_x = np.random.uniform(coord[0]-noi/2, coord[0]+noi/2, (n, 1))
#random_points_x = np.add(noise_x, random_points_x)
random_points_y = np.random.uniform(coord[1] - r/2, coord[1] + r/2, n)
#noise_y = np.random.uniform(coord[1]-noi/2, coord[1]+noi/2, (n, 1))
#random_points_y = np.add(noise_y, random_points_y)
random_points = list(zip(random_points_x, random_points_y))
charges_left = np.append(charges_left, np.count_nonzero(is_contain(random_points, l[1])))
charges_center = np.append(charges_center, np.count_nonzero(is_contain(random_points, l[0])))
charges_right = np.append(charges_right, np.count_nonzero(is_contain(random_points, l[5])))
charges_upleft = np.append(charges_upleft, np.count_nonzero(is_contain(random_points, l[2])))
charges_upcenter = np.append(charges_upcenter, np.count_nonzero(is_contain(random_points, l[3])))
charges_upright = np.append(charges_upright, np.count_nonzero(is_contain(random_points, l[4])))
charges_lowleft = np.append(charges_lowcenter, np.count_nonzero(is_contain(random_points, l[8])))
charges_lowcenter = np.append(charges_lowcenter, np.count_nonzero(is_contain(random_points, l[7])))
charges_lowright = np.append(charges_lowright, np.count_nonzero(is_contain(random_points, l[6])))
charge_sum_left = np.sum(charges_left) / divi#/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_center = np.sum(charges_center) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_right = np.sum(charges_right) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_upleft = np.sum(charges_upleft) / divi #/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_upcenter = np.sum(charges_upcenter) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_upright = np.sum(charges_upright) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_lowleft = np.sum(charges_lowleft) / divi #/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_lowcenter = np.sum(charges_lowcenter) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_lowright = np.sum(charges_lowright) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
total = charge_sum_lowright + charge_sum_lowleft + charge_sum_lowcenter + charge_sum_right + charge_sum_center + charge_sum_left + charge_sum_upright + charge_sum_upcenter + charge_sum_upleft
left_a_x = charge_sum_upleft + charge_sum_left + charge_sum_lowleft
center_a_x = charge_sum_upcenter + charge_sum_center + charge_sum_lowcenter
right_a_x = charge_sum_upright + charge_sum_right + charge_sum_lowright
up_a_y = charge_sum_upleft + charge_sum_upcenter + charge_sum_upright
center_a_y = charge_sum_left + charge_sum_center + charge_sum_right
low_a_y = charge_sum_lowleft + charge_sum_lowcenter + charge_sum_lowright
if total == 0:
r_cons_x = np.nan
r_cons_y = np.nan
else:
r_cons_x = (left_a_x * (-1.5*s/2) + center_a_x* (-s/2) + right_a_x * (s/2))/total
r_cons_y = (up_a_y * (1.5*s) + center_a_y * (s/2) + low_a_y * (-s/2))/total
delta = ((r_cons_x - list_coord[z][0])**2 + (r_cons_y-list_coord[z][1])**2 )**0.5
y_axis = np.append(delta, y_axis)
return random_coord_x, random_coord_y, y_axis
def simulate_amplitude4(l, s, n, r, laser):
"""
:param l: a list of adjacent pads/polygons
:param s: length of side of one box
:param n: the number of random points
:param r: standard d of the laser
:return: graph position resolution vs laser position
"""
path = 3*s+7
list_a = np.array([])
charges_sum_right = np.array([])
charges_sum_center = np.array([])
center_of_pads = np.array([-s / 2, s / 2, s / 2, s / 2])
y_axis = np.array([])
k = 20
def is_contain(point_list, b):
zeros = np.array([])
for j in range(len(point_list)):
zeros = np.append(zeros, b.contains(Point(point_list[j])))
return zeros
random_coord_x = np.random.uniform(-2*s-s/4, s+s/4, laser)
random_coord_y = np.random.uniform(-s-s/4, 2*s+s/4, laser)
list_coord = list(zip(random_coord_x, random_coord_y))
divi = k*n
for z in range(len(list_coord)):
print(z)
coord = list_coord[z]
charges_left = np.array([])
charges_center = np.array([])
charges_right = np.array([])
charges_upleft = np.array([])
charges_upcenter = np.array([])
charges_upright = np.array([])
charges_lowleft = np.array([])
charges_lowcenter = np.array([])
charges_lowright = np.array([])
for j in range(k):
random_points_x = np.random.uniform(coord[0] - r/2, coord[0] + r/2, n)
#noise_x = np.random.uniform(coord[0]-noi/2, coord[0]+noi/2, (n, 1))
#random_points_x = np.add(noise_x, random_points_x)
random_points_y = np.random.uniform(coord[1] - r/2, coord[1] + r/2, n)
#noise_y = np.random.uniform(coord[1]-noi/2, coord[1]+noi/2, (n, 1))
#random_points_y = np.add(noise_y, random_points_y)
random_points = list(zip(random_points_x, random_points_y))
charges_left = np.append(charges_left, np.count_nonzero(is_contain(random_points, l[1])))
charges_center = np.append(charges_center, np.count_nonzero(is_contain(random_points, l[0])))
charges_right = np.append(charges_right, np.count_nonzero(is_contain(random_points, l[5])))
charges_upleft = np.append(charges_upleft, np.count_nonzero(is_contain(random_points, l[2])))
charges_upcenter = np.append(charges_upcenter, np.count_nonzero(is_contain(random_points, l[3])))
charges_upright = np.append(charges_upright, np.count_nonzero(is_contain(random_points, l[4])))
charges_lowleft = np.append(charges_lowcenter, np.count_nonzero(is_contain(random_points, l[8])))
charges_lowcenter = np.append(charges_lowcenter, np.count_nonzero(is_contain(random_points, l[7])))
charges_lowright = np.append(charges_lowright, np.count_nonzero(is_contain(random_points, l[6])))
charge_sum_left = np.sum(charges_left) / divi#/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_center = np.sum(charges_center) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_right = np.sum(charges_right) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_upleft = np.sum(charges_upleft) / divi #/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_upcenter = np.sum(charges_upcenter) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_upright = np.sum(charges_upright) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_lowleft = np.sum(charges_lowleft) / divi #/ (np.sum(charges_right) + np.sum(charges_center)
charge_sum_lowcenter = np.sum(charges_lowcenter) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
charge_sum_lowright = np.sum(charges_lowright) / divi #/ (np.sum(charges_right) + np.sum(charges_center))
total = charge_sum_lowright + charge_sum_lowleft + charge_sum_lowcenter + charge_sum_right + charge_sum_center + charge_sum_left + charge_sum_upright + charge_sum_upcenter + charge_sum_upleft
left_a_x = charge_sum_upleft + charge_sum_left + charge_sum_lowleft
center_a_x = charge_sum_upcenter + charge_sum_center + charge_sum_lowcenter
right_a_x = charge_sum_upright + charge_sum_right + charge_sum_lowright
up_a_y = charge_sum_upleft + charge_sum_upcenter + charge_sum_upright
center_a_y = charge_sum_left + charge_sum_center + charge_sum_right
low_a_y = charge_sum_lowleft + charge_sum_lowcenter + charge_sum_lowright
if total == 0:
r_cons_x = np.nan
r_cons_y = np.nan
else:
r_cons_x = (left_a_x * (-1.5*s/2) + center_a_x* (-s/2) + right_a_x * (s/2))/total
r_cons_y = (up_a_y * (1.5*s) + center_a_y * (s/2) + low_a_y * (-s/2))/total
delta = ((r_cons_x - list_coord[z][0])**2 + (r_cons_y-list_coord[z][1])**2 )**0.5
y_axis = np.append(delta, y_axis)
return random_coord_x, random_coord_y, y_axis
# """
# # test closed polygon
# p1 = [(0, 0), (1, 1), (1, 0)]
# poly1 = Polygon(p1)
# print('This is poly1, area, expected 0.5:', poly1.area)
# p2 = [(0, 0), (1, 1), (2, 2), (2, 1), (2, 0), (1, 0)]
# poly2 = Polygon(p2)
# print('This is poly2, area, expected 2:', poly2.area)
# p3 = [(0, 0), (0, 1), (1, 1), (1, 0)]
# poly3 = Polygon(p3)
# print('This is poly3, area, expected 1:', poly3.area)
# p4 = [(0, 0), (0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0)]
# poly4 = Polygon(p4)
# print('This is poly4, area, expected 4:', poly4.area)
# p5 = [(0, 0), (0, -1), (0, -2), (-1, -2), (-2, -2), (-2, -1), (-2, 0)]
# poly5 = Polygon(p5)
# print('This is poly5, area, expected 4:', poly5.area)
# """
# """
# # test box function
# box, length = get_one_square_box(12)
# x, y = box.exterior.xy
# plt.plot(x, y)
# """
# """
# # test modify_random function
# box1, length1 = get_one_square_box(10)
# b_after1 = modify_random_shape_of_box(box1, length)
# x1, y1 = b_after1.exterior.xy
# plt.plot(x1, y1)
# """
# test get_pad_array
box2, length2 = get_one_square_box(12)
b_after2 = modify_random_shape_of_box(box2, length2)
array2 = get_pad_array(b_after2, length2)
poly2a = array2[0]
poly2b = array2[1]
x2a, y2a = poly2a.exterior.xy
x2b, y2b = poly2b.exterior.xy
plt.plot(x2a, y2a, 'r', x2b, y2b, 'g')
# """
# # test simulate_amplitude with random shape
# box3, length3 = get_one_square_box(12)
# b_after3 = modify_random_shape_of_box(box3, length3)
# array3 = get_pad_array(b_after3, length3)
# poly3a = array3[0]
# poly3b = array3[1]
# x3a, y3a = poly3a.exterior.xy
# x3b, y3b = poly3b.exterior.xy
# plt.subplot(1, 2, 1)
# plt.plot(x3a, y3a, 'r', x3b, y3b, 'g')
# plt.title('pad shape with coord in mm')
# plt.subplot(1, 2, 2)
# #simulate_amplitude(array3, length3, 100, length3/2)
# simulate_amplitude2(array3, length3, 1000, length3/2)
# plt.title('f vs laser position, \n 1000 points per position \n normal distribution sd=%s' % length3)
# plt.ylabel('f = A1/(A1+A2)')
# plt.xlabel('laser position in mm')
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
# plt.show()
# simulate_amplitude(array3, length3, 1000, length3/2)
# plt.title('f vs laser position, \n 1000 points per position \n normal distribution sd=%s' % length3)
# plt.ylabel('f = A1/(A1+A2)')
# plt.xlabel('laser position in mm')
# plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
# """
# test simulate_amplitude with regular box
box5, length5 = get_one_square_box(8)
box5 = modify_specific_shape_of_box(box5, length5, 'sin', 2)
array5 = get_pad_array(box5, length5)
array5b = get_pad_nine(box5, length5)
# poly5a = array5[0]
# poly5b = array5[1]
# x5a, y5a = poly5a.exterior.xy
# x5b, y5b = poly5b.exterior.xy
# plt.subplot(1, 2, 1)
# plt.plot(x5a, y5a, 'r', x5b, y5b, 'g')
# plt.title('pad shape with coord in mm')
# plt.subplot(1, 2, 2)
# n = 1
# #simulate_amplitude(array5, length5, 100, length5/2)
# simulate_amplitude2(array5, length5, 100, n)
# plt.title('side length = 12, interval = %s' %n)
# plt.ylabel('f = A1*(-5)+A2*5/(A1+A2)')
# plt.xlabel('laser position in mm')
# #plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
# plt.show()
#
# simulate_amplitude(array5, length5, 100, n)
# plt.title('side length = 12, interval = %s' %n)
# plt.ylabel('number of charges in each pad')
# plt.xlabel('laser position in mm')
# #plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
# plt.show()
n=0.2
poly5a = array5b[0]
poly5b = array5b[1]
poly5c = array5b[2]
poly5d = array5b[3]
poly5e = array5b[4]
poly5f = array5b[5]
poly5g = array5b[6]
poly5h = array5b[7]
poly5i = array5b[8]
x5a, y5a = poly5a.exterior.xy
x5b, y5b = poly5b.exterior.xy
x5c, y5c = poly5c.exterior.xy
x5d, y5d = poly5d.exterior.xy
x5e, y5e = poly5e.exterior.xy
x5f, y5f = poly5f.exterior.xy
x5g, y5g = poly5g.exterior.xy
x5h, y5h = poly5h.exterior.xy
x5i, y5i = poly5i.exterior.xy
# x, y, z = simulate_amplitude3(array5b, length5, 500, n, 100)
# formatted_x = [round(elem) for elem in x ]
# formatted_y = [round(elem) for elem in y ]
# row_data = {'x': formatted_x, 'y': formatted_y, 'Amp': z, 'row_x': x, 'row_y': y}
# df = pd.DataFrame(row_data, columns = ['x','y','Amp','row_x','row_y'])
# index_drop = df[df['Amp'].isnull()].index
# df.drop(index_drop, inplace=True)
# df.to_csv(r'/Users/fjwu/Desktop/git\repo/SecondTry5.csv', index = None, header=True)
fig = plt.figure(figsize=(6,4))
# ax = fig.add_subplot(212)
#
# data = pd.read_csv("SecondTry5.csv")
#
# data_pivoted = data.pivot_table(index='y', columns='x', values='Amp')
# sns.heatmap(data_pivoted, cmap='Greens')
# ax.invert_yaxis()
# plt.title("Resolution in mm")
# plt.xlabel("coord_x in mm")
# plt.ylabel("coord_y in mm")
#
# X = data['row_x'].to_numpy()
# Y = data['row_y'].to_numpy()
# Z = data['Amp'].to_numpy()
#
# col = np.arange(len(Z))
# ax = plt.axes(projection='3d')
# X, Y = xx, yy = np.meshgrid(X, Y)
# ax.plot_surface(X, Y, Z, cmap='binary')
# # ax.scatter(X, Y, Z, c=col, depthshade=True)
# # ax.set_xlabel('x')
# # ax.set_ylabel('y')
# # ax.set_zlabel('z');
# # ax.scatter(Y,Z,c='b', alpha=0.5)
#
#Add second axes object
ax = fig.add_subplot(211)
plt.plot(x5a, y5a, 'g', x5b, y5b, 'g', x5c, y5c, 'g',
x5d, y5d, 'g',x5e, y5e, 'g', x5f, y5f, 'g',
x5g, y5g, 'g',x5h, y5h, 'g', x5i, y5i, 'g')
plt.title('pad shape with coord in mm')
# # # Make sure the elements of the plot are arranged properly
# # plt.tight_layout()
# plt.show()
# n = 7
# #simulate_amplitude(array5, length5, 100, length5/2)
# simulate_amplitude2(array5, length5, 100, n)
# plt.title('side length = 12, interval = %s' %n)
# plt.ylabel('f = A1*(-5)+A2*5/(A1+A2)')
# plt.xlabel('laser position in mm')
# #plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
# plt.show()
# simulate_amplitude(array5, length5, 100, n)
# plt.title('side length = 12, interval = %s' %n)
# plt.ylabel('number of charges in each pad')
# plt.xlabel('laser position in mm')
# #plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.5, hspace=0.2)
plt.show()
|
/Users/daniel/anaconda/lib/python3.6/sre_compile.py |
# Generated by Django 3.1.1 on 2020-09-27 13:04
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scrapingApp', '0007_auto_20200927_1602'),
]
operations = [
migrations.AlterModelTable(
name='parliament1',
table=None,
),
]
|
class Ray:
def __init__(self, origin, direction):
self.origin = origin
self.direction = direction
self.t = 0
def xform(self, mat):
self.origin = mat.xformPoint(self.origin)
self.direction = mat.xformVec(self.direction)
def getOrigin(self):
return self.origin
def getDirection(self):
return self.direction
|
# !/usr/bin/python2
# -*- coding: UTF-8 -*-
import os
import re
import xlwt
def get_data(file_name):
action = []
action_time = []
with open(file_name, 'r', encoding='utf-8') as file_name:
lines = file_name.readlines()
for line in lines:
info = re.match(
r'^[0-9\/\s\:\,]{21}ACTION\s(\w{1,50})\s\[finished\][\w\s\=]{1,50}\=([0-9\.]{1,10})sec', line)
if info:
action.append(info.group(1))
action_time.append(info.group(2))
print(action, action_time)
return action, action_time
def cal_time(action, action_time):
# 查找集合中的重复元素
all_action = set(action)
action_data = [['run_num', 'action_name', 'avg_time']]
for action_name in all_action:
i = 0
run_num = 0
avg_time = 0
for a in action:
if a == action_name:
run_num = run_num + 1
avg_time = avg_time + float(action_time[i])
i = i + 1
avg_time = float('%.2f' % float(avg_time/run_num))
action_data.append([run_num, action_name, avg_time])
action_data = action_data[0:1] + sorted(action_data[1:len(action_data)])
return action_data
def write_to_excel(sheet_name,action_info, excel_name):
excel = xlwt.Workbook(encoding="ascii")
table = excel.add_sheet(sheet_name)
row = 0
for item in action_info:
column = 0
while column < len(item):
if row == 0:
style = xlwt.easyxf('font:name Times New Roman,color-index red,bold on', num_format_str='#,##0.00')
table.write(row, column, item[column], style)
elif isinstance(item[column], float) and item[column] > 2:
style = xlwt.easyxf('font:name Times New Roman,color-index blue,bold on', num_format_str='#,##0.00')
table.write(row, column, item[column], style)
else:
style = xlwt.easyxf('font:name Times New Roman,color-index black,bold on', num_format_str='#,##0.00')
table.write(row, column, item[column], style)
column = column + 1
row = row + 1
excel.save(excel_name)
if __name__ == '__main__':
path = os.getcwd() #返回当前工作目录
file_name = path + '\case_log.txt'
sheet_name = 'action_avg_time'
excel_name = path + '\ action_log.xls'
action, action_time = get_data(file_name)
action_info = cal_time(action, action_time)
write_to_excel(sheet_name, action_info, excel_name)
|
from PySide2.QtCore import QTimer
class Timer:
@staticmethod
def __convert_2_local_time(wpm):
return 60 * 1000 / wpm
def __init__(self):
self.__timer = QTimer()
def start(self, time, function, make_conversion=True):
self.__timer = QTimer()
if make_conversion:
time = self.__convert_2_local_time(time)
self.__timer.timeout.connect(function)
self.__timer.start(time)
def stop(self):
self.__timer.stop()
def delete(self):
self.__timer = None
def is_active(self):
return self.__timer and self.__timer.isActive()
def is_not_deleted(self):
return self.__timer is not None
|
n = int(input())
sum_sqr = 0
i = 1
while i <= n:
sum_sqr += i ** 2
i += 1
print(sum_sqr)
|
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView
from .forms import SignUpForm
# Create your views here.
class NewUserCreationForm(CreateView):
template_name = 'registration/register.html'
form_class = SignUpForm
success_url = reverse_lazy('login')
|
import unittest
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import testing
import numpy as np
import pytest
from onnx_chainer import export
from onnx_chainer.export import RetainInputHook
from onnx_chainer.testing import input_generator
from tests.helper import ONNXModelTest
@testing.parameterize(
{'condition': 'tuple'},
{'condition': 'tuple_with_name', 'input_names': ['x', 'y', 'z']},
{'condition': 'list', 'in_type': 'list'},
{'condition': 'list_with_names', 'in_type': 'list',
'input_names': ['x', 'y', 'z']},
{'condition': 'var', 'in_type': 'variable'},
{'condition': 'var_with_names', 'in_type': 'variable',
'input_names': ['x', 'y', 'z']},
{'condition': 'varlist', 'in_type': 'variable_list'},
{'condition': 'varlist_with_names', 'in_type': 'variable_list',
'input_names': ['x', 'y', 'z']},
{'condition': 'dict', 'in_type': 'dict'},
{'condition': 'dict_with_names', 'in_type': 'dict',
'input_names': {'x': 'in_x', 'y': 'in_y', 'z': 'in_z'}},
{'condition': 'dict_with_name_list', 'in_type': 'dict',
'input_names': ['x', 'y', 'z']},
{'condition': 'vardict', 'in_type': 'variable_dict'},
{'condition': 'vardict_with_names', 'in_type': 'variable_dict',
'input_names': {'x': 'in_x', 'y': 'in_y', 'z': 'in_z'}},
)
class TestMultipleInputs(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
with self.init_scope():
self.prelu = L.PReLU()
def __call__(self, x, y, z):
return F.relu(x) + self.prelu(y) * z
return Model()
def get_x(self, in_type=None):
base_x = (input_generator.increasing(1, 5),
input_generator.increasing(1, 5)*1.1,
input_generator.increasing(1, 5)*1.2)
names = ['x', 'y', 'z']
if in_type is None:
return base_x
elif in_type == 'list':
return list(base_x)
elif in_type == 'variable':
return tuple(chainer.Variable(v) for v in base_x)
elif in_type == 'variable_list':
return [chainer.Variable(v) for v in base_x]
elif in_type == 'dict':
return {names[i]: v for i, v in enumerate(base_x)}
elif in_type == 'variable_dict':
return {names[i]: chainer.Variable(v)
for i, v in enumerate(base_x)}
def test_multiple_inputs(self):
model = self.get_model()
x = self.get_x(getattr(self, 'in_type', None))
name = 'multipleinputs_' + self.condition
input_names = getattr(self, 'input_names', None)
self.expect(model, x, name=name, input_names=input_names)
class TestImplicitInput(ONNXModelTest):
def test_implicit_param(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.frac = chainer.Parameter(np.array(2, dtype=np.float32))
def forward(self, x):
return x / self.frac
x = chainer.Variable(np.array(1, dtype=np.float32))
self.expect(Model(), x, name='implicit_param')
def test_implicit_param_ndarray(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
self.frac = np.array(2, dtype=np.float32)
def forward(self, x):
return x / self.frac
x = chainer.Variable(np.array(1, dtype=np.float32))
self.expect(Model(), x, name='implicit_param_ndarray')
def test_implicit_temporary_input(self):
class Model(chainer.Chain):
def forward(self, x):
return x + chainer.Variable(np.array(3, dtype=np.float32))
x = np.array(5, dtype=np.float32)
self.expect(Model(), x, name='implicit_temp_input')
def test_implicit_temporary_input_ndarray(self):
class Model(chainer.Chain):
def forward(self, x):
return x + np.array(3, dtype=np.float32)
x = np.array(5, dtype=np.float32)
self.expect(Model(), x, name='implicit_temp_input_ndarray')
class TestRetainInputHook(object):
def get_x(self, test_type):
if test_type == 'list':
return [
chainer.Variable(np.array(3, dtype=np.float32)),
chainer.Variable(np.array(5, dtype=np.float32))]
elif test_type == 'dict':
return {'x': chainer.Variable(np.array(3, dtype=np.float32))}
elif test_type == 'array':
return np.array(3, dtype=np.float32)
else:
assert test_type == 'variable'
return chainer.Variable(np.array(3, dtype=np.float32))
@pytest.mark.parametrize(
'test_type', ['variable', 'list', 'dict', 'array'])
def test_hook_for_funcnode(self, test_type):
class Model(chainer.Chain):
def forward(self, x):
if test_type in ['variable', 'array']:
x = [chainer.as_variable(x)]
elif test_type == 'dict':
x = list(x.values())
x.append(chainer.Variable(np.array(7, np.float32)))
return F.stack(x)
model = Model()
x = self.get_x(test_type)
with RetainInputHook() as h:
model(x)
expected_count = 1
if test_type == 'array':
# input is ndarray and not checked in forward_preprocess
expected_count += 1
assert len(h.retain_inputs) == expected_count
@pytest.mark.parametrize('test_type', ['array'])
def test_hook_for_childlink(self, test_type):
# TODO(disktnk): test_type='variable' is failed
class ChildModel(chainer.Chain):
def forward(self, x, h):
if test_type in ['variable', 'array']:
h = [chainer.as_variable(h)]
elif test_type == 'dict':
h = list(h.values())
h.append(x)
return F.stack(h)
class ParentModel(chainer.Chain):
def __init__(self, get_x):
super().__init__()
self.get_x = get_x
with self.init_scope():
self.m = ChildModel()
def forward(self, x):
h = self.get_x(test_type)
return self.m(x, h)
model = ParentModel(self.get_x)
x = self.get_x('variable')
with RetainInputHook() as h:
model(x)
assert len(h.retain_inputs) == 1
@testing.parameterize(
{'use_bn': True, 'out_type': 'dict', 'condition': 'bn_out_dict'},
{'use_bn': False, 'out_type': 'dict', 'condition': 'out_dict'},
{'use_bn': True, 'out_type': 'dict', 'condition': 'bn_out_dict_with_name',
'output_names': {'tanh': 'out_tanh', 'sigmoid': 'out_sigmoid'}},
{'use_bn': True, 'out_type': 'dict',
'condition': 'bn_out_dict_with_name_list',
'output_names': ('out_tanh', 'out_sigmoid')},
{'use_bn': True, 'out_type': 'tuple', 'condition': 'bn_out_tuple'},
{'use_bn': True, 'out_type': 'tuple',
'condition': 'bn_out_tuple_with_name',
'output_names': ['out_tanh', 'out_sigmoid']},
{'use_bn': True, 'out_type': 'list', 'condition': 'bn_out_list'},
{'use_bn': True, 'out_type': 'list', 'condition': 'bn_out_list_with_name',
'output_names': ['out_tanh', 'out_sigmoid']},
)
class TestMultipleOutput(ONNXModelTest):
def get_model(self, use_bn=False, out_type=None):
class Model(chainer.Chain):
def __init__(self, use_bn=False, out_type=None):
super(Model, self).__init__()
self._use_bn = use_bn
self._out_type = out_type
with self.init_scope():
self.conv = L.Convolution2D(None, 32, ksize=3, stride=1)
if self._use_bn:
self.bn = L.BatchNormalization(32)
def __call__(self, x):
h = self.conv(x)
if self._use_bn:
h = self.bn(h)
o1 = F.tanh(h)
o2 = F.sigmoid(h)
if self._out_type == 'dict':
return {
'tanh': o1,
'sigmoid': o2
}
elif self._out_type == 'tuple':
return o1, o2
elif self._out_type == 'list':
return [o1, o2]
return Model(use_bn=use_bn, out_type=out_type)
def test_multiple_outputs(self):
model = self.get_model(use_bn=self.use_bn, out_type=self.out_type)
x = np.zeros((1, 3, 32, 32), dtype=np.float32)
name = 'multipleoutput_' + self.condition
output_names = getattr(self, 'output_names', None)
self.expect(model, x, name=name, output_names=output_names)
class TestIntermediateOutput(ONNXModelTest):
def get_model(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(4)
self.l2 = L.Linear(5, initial_bias=0.1)
def __call__(self, x):
y = self.l1(x)
z = self.l2(y)
return y, z
return Model()
def test_outputs(self):
model = self.get_model()
x = np.ones((1, 3), dtype=np.float32)
self.expect(model, x, output_names=['y', 'z'])
@testing.parameterize(
{'out_kind': 'var'},
{'out_kind': 'array'},
{'out_kind': 'array_in_tuple'},
{'out_kind': 'list_in_tuple'},
)
class TestOutputTypeCheck(unittest.TestCase):
def test_output_type_check(self):
class Model(chainer.Chain):
def __init__(self, out_kind):
super().__init__()
self.out_kind = out_kind
def __call__(self, x):
if self.out_kind == 'array':
return x.array
elif self.out_kind == 'array_in_tuple':
return x, x.array
elif self.out_kind == 'list_in_tuple':
return ([x]),
else:
assert self.out_kind == 'var'
return x
model = Model(self.out_kind)
x = np.ones((1, 3, 4, 5), dtype=np.float32)
if self.out_kind == 'var':
export(model, (x,)) # should be no error
elif self.out_kind == 'array':
with self.assertRaises(RuntimeError) as e:
export(model, (x,))
assert 'Unexpected output type'.find(e.exception.args[0])
else:
with self.assertRaises(ValueError) as e:
export(model, (x,))
assert 'must be Chainer Variable'.find(e.exception.args[0])
class TestUnusedLink(ONNXModelTest):
# When some links are under init scope but not used on forwarding, params
# of the links are not initialized. This means exporter cannot convert them
# to ONNX's tensor because of lack of shape etc.
def test_outputs(self):
class MLP(chainer.Chain):
def __init__(self, n_units, n_out):
super(MLP, self).__init__()
with self.init_scope():
self.l1 = L.Linear(None, n_units)
self.l2 = L.Linear(None, n_units)
self.l3 = L.Linear(None, n_out)
def __call__(self, x):
h1 = F.relu(self.l1(x))
# Unused for some reason, then params are not initialized.
# h2 = F.relu(self.l2(h1))
return self.l3(h1)
model = MLP(100, 10)
x = np.random.rand(1, 768).astype(np.float32)
with testing.assert_warns(UserWarning):
self.expect(model, x)
@testing.parameterize(
{
'x_shape': (10, 3, 28, 28), 'shape_option': ('b', 3, 28, 28),
},
{
'x_shape': (10, 3, 28, 28),
'shape_option': [('b', 3, 28, 28)],
'condition': 'var_list'
},
{
'x_shape': [(10, 3, 28, 28), (8, 3, 28, 28)],
'shape_option': [('b', 3, 28, 28), ('b', 3, 28, 28)],
'condition': 'list_list'
},
{
'x_shape': {'1': (10, 3, 28, 28), '2': (8, 3, 28, 28)},
'shape_option': {'2': ('b', 3, 28, 28), '1': ('b', 3, 28, 28)},
'condition': 'dict_dict'
},
{
'x_shape': {'1': (10, 3, 28, 28), '2': (8, 3, 28, 28)},
'shape_option': [('b', 3, 28, 28), ('b', 3, 28, 28)],
'condition': 'dict_list'
},
)
class TestCustomizedInputShape(ONNXModelTest):
def test_output(self):
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Convolution2D(None, 16, 5, 1, 2)
self.l2 = L.Convolution2D(16, 8, 5, 1, 2)
def forward(self, *xs, **kwxs):
if kwxs:
h = F.vstack(list(kwxs.values()))
elif len(xs) > 1:
h = F.vstack(xs)
else:
h = xs[0]
h2 = self.l1(h)
h3 = F.relu(h2)
h4 = self.l2(h3)
return F.relu(h4)
def check_input_shape(onnx_model, path):
assert [v.type.tensor_type.shape.dim[0] == 'b' for
v in onnx_model.graph.input]
assert [v.type.tensor_type.shape.dim[0] == 'b' for
v in onnx_model.graph.output]
if isinstance(self.x_shape, tuple):
xs = np.zeros(self.x_shape, dtype=np.float32)
elif isinstance(self.x_shape, list):
xs = tuple(
np.zeros(shape, dtype=np.float32) for shape in self.x_shape)
else:
assert isinstance(self.x_shape, dict)
xs = {k: np.zeros(shape, dtype=np.float32) for
k, shape in self.x_shape.items()}
name = 'customized_input_shape'
if hasattr(self, 'condition'):
name += '_{}'.format(self.condition)
self.expect(
Model(), xs, name=name, input_shapes=self.shape_option,
custom_model_test_func=check_input_shape)
@pytest.mark.parametrize('x_shape,shape_option', [
((10, 5), '?'), # not tuple
((10, 5), ('?', 5, 5)), # shape length error
((10, 5), [('?', 5), ('?', 5)]), # not single
([(10, 5), (10, 5)], [('?', 5), ('?', 5), ('?', 5)]), # list length error
([(10, 5), (10, 5)], [('?', 5), ('?', 5, 5)]), # shape length error
({'a': (10, 5), 'b': (10, 5)}, {'a': ('?', 5), 'c': ('?', 5)}), # NOQA not key found
({'a': (10, 5), 'b': (10, 5)}, [('?', 5), ('?', 5), ('?', 5)]), # NOQA list length error
({'a': (10, 5), 'b': (10, 5)}, {'a': ('?', 5), 'b': ('?', 5, 5)}), # NOQA not key found
])
def test_invalid_customized_input_shape(x_shape, shape_option):
model = chainer.Sequential(F.relu)
if isinstance(x_shape, tuple):
xs = np.zeros(x_shape, dtype=np.float32)
elif isinstance(x_shape, list):
xs = tuple(
np.zeros(shape, dtype=np.float32) for shape in x_shape)
else:
assert isinstance(x_shape, dict)
xs = {k: np.zeros(shape, dtype=np.float32) for
k, shape in x_shape.items()}
with pytest.raises(ValueError):
export(model, xs, input_shapes=shape_option)
|
from __future__ import print_function
from aes_cbc_cipher import AES_CBC_Cipher
import argparse
import base64
def analyze(token):
print('Original token:' + token)
CT = base64.b64decode(token)
IV = []
MSG = []
#print the base 16 breakdown of the CT
print('Cipher text: [ ', end='')
for i in range(0, len(CT)):
if (i < 16):
IV.append(CT[i])
else:
MSG.append(CT[i])
print('0x' + base64.b16encode(CT[i]) + ' ', end='')
print(']')
print('IV:' + str(IV))
print('MSG:' + str(MSG))
def read_input(in_file):
data = ''
try:
f = open(in_file, 'rb')
data = str.rstrip(f.read())
except:
print('Unable to read file "' + in_file + '"')
return data
def main():
parser = argparse.ArgumentParser(description='Enter a token to analyze')
parser.add_argument('token_path', type=str, help='the path to a valid token to analyze')
args = parser.parse_args()
data = read_input(args.token_path)
if (data != '' and len(data) == 44):
analyze(data)
else:
print('file doesn\'t appear to be a valid token')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import time
import threading
import requests
import sys
if sys.version_info < (3, 0):
import Queue as queue
else:
import queue
class MessageBus(object):
def __init__(self):
self.session = requests.Session()
self.queue = queue.Queue()
self.is_quit = False
self.url = 'http://localhost:8080/kalliope'
def put(self, payload, notification='KALLIOPE'):
self.queue.put((payload, notification))
def start(self):
self.is_quit = False
self.thread = threading.Thread(target=self._run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.is_quit = True
self.queue.put(('', ''))
def _run(self):
while not self.is_quit:
payload, notification = self.queue.get()
while self.queue.qsize():
payload, notification = self.queue.get()
data = {
"notification": notification,
"payload": payload
}
self.session.post(self.url, data=data)
if __name__ == '__main__':
bus = MessageBus()
bus.start()
bus.put('hello world')
time.sleep(3)
bus.put('你好帅!')
time.sleep(3)
bus.stop()
|
#! /usr/bin/python3
import base64
def main():
password = input("What Is The Password You Would Like To Decrypt? ")
decrypt = base64.b64decode(password)
print("Decrypted Password: " + str(decrypt))
welcome = input("Do You Have Another Password To Decrypt? [Yes|No]")
if welcome.lower() == "yes":
main()
else:
print("Thank You!")
exit
if __name__ == "__main__":
main()
|
def recorrido_del_caballo(A, x, y, i):
A[x][y] = i+1
movimientos = obtener_movimientos(x, y)
movimientos_legales = [mov for mov in movimientos if A[mov.x][mov.y] != 0] # los no repetidos
cant_mov_legales = len(movimientos_legales)
if cant_mov_legales == 0:
if i == 63:
#termine exitosamente
return true
else:
# algo estuvo mal, entonces deshago un paso y pruebo las otras alternativas
A[x][y] = 0
return false
else:
for mov in movimientos_legales:
res = recorrido_del_caballo(A, mov.x, mov.y, i+1)
if res:
return true # si alguno fue exitoso, retorno ahi nomas
return false # ninguno fue exitoso en este camino
|
#top down approach
def lcs_topDown(x,y,lenx,leny):
array = [[-1]*(leny+1) for i in range(lenx+1) ]
#print(array.shape)
for i in range(lenx+1):
for j in range(leny+1):
if i == 0 or j == 0:
array[i][j] = 0
if x[i - 1] == y[j - 1]:
array[i][j] = 1 + array[i-1][j-1]
else:
array[i][j] = 0
return array[lenx][leny] #result is in final cell
import numpy as np
X = "AGGTAB"
Y = "GXTXAB"
array = [[-1]*(len(Y)+1) for i in range(len(X)+1) ]
print(array,np.array(array).shape)
print "Length of LCS is ", lcs_topDown(X , Y, len(X), len(Y))
|
import pickle
from file_manager import FileManager
from config import Config
from message import Message
from torrent import Torrent
from htpbs import ProgressBars, Work
import time # required for demonstration purposes only
class Uploader:
def __init__(self, peer_id, server, peer_uploader, address, torrent):
self.peer_id = peer_id
self.config = Config()
self.torrent = torrent
self.file_manager = FileManager(torrent, peer_id)
self.peer_uploader = peer_uploader
self.server = server
self.address = address
self.peer_id = -1
self.uploaded = 0 # bytes
self.downloaded = 0 # bytes
self.message = Message() #Send message
#### implement this ####
self.uploader_bitfield = None
self.downloader_bitfield = None
def send(self, data):
serialized_data = pickle.dumps(data)
self.peer_uploader.send(serialized_data)
def receive(self, max_alloc_mem=4096):
serialized_data = self.peer_uploader.recv(max_alloc_mem)
data = pickle.loads(serialized_data)
return data
def work(self, progressbars, bar_index, work_value, work_name, uploadeder):
"""
:param progressbars: the progressbar obkect
:param bar_index: a integer representing the index of the bae
:param work_value: a value for time.sleep() to simulate different progress bars rates
:param work_name: the name of the work
:return: VOID
"""
progressbars.set_bar_prefix(bar_index=bar_index, prefix=work_name)
progressbars.set_bar_suffix(bar_index=bar_index, suffix=" >")
for i in range(101):
# your work here. we use the time.sleep() as example
# Real work could be downloading a file and show progress
time.sleep(work_value)
data = uploadeder.receive()
# block = uploadeder.file_manager.get_block(data['index'], data['begin'], uploadeder.torrent.block_size(), uploadeder.file_manager.path_to_original_file)
# package = uploadeder.message.piece
# package['index'] = data['index']
# package['begin'] = data['begin']
# package['block'] = block
uploadeder.send(i)
progressbars.update(bar_index=bar_index, value=i)
progressbars.finish()
def run(self):
self.file_manager.set_path_to_original_file("age.txt")
# block = self.file_manager.get_block(0, 0, self.torrent.block_size(), self.file_manager.path_to_original_file)
# package = self.message.piece
# package['index'] = 0
# package['begin'] = 0
# package['block'] = block
#self.send(package)
# start with 3 bars
progressbars = ProgressBars(num_bars=1)
# set bar #3 to be the total progress
#progressbars.set_last_bar_as_total_progress(prefix="Total: ")
# start all the threaded works
# Work.start(self.work, (progressbars, 0, 0.1, "<Piece 0 : ", self))
#Work.start(self.work, (progressbars, 1, 0.01, "w2: "))
index = 0
print("running uploader")
while True:
progressbars.set_bar_prefix(bar_index=0, prefix="<Piece " + str(index) + " :")
w = 12.5
for i in range(8):
#time.sleep(0.1)
data = self.receive()
block = self.file_manager.get_block(data['index'], data['begin'], self.torrent.block_size(), self.file_manager.path_to_original_file)
package = self.message.piece
package['index'] = data['index']
package['begin'] = data['begin']
package['block'] = block
#print(package)
self.send(package)
w += 12.5
progressbars.update(bar_index=0, value=w)
# progressbars.finish()
index += 1
"""
while True:
data = self.receive()
print(data)
block = self.file_manager.get_block(data['index'], data['begin'], self.torrent.block_size(), self.file_manager.path_to_original_file)
package = self.message.piece
package['index'] = data['index']
package['begin'] = data['begin']
package['block'] = block
self.send(package)
"""
|
import pandas as pd
from visual import visual
from gain_data import to_download_data_path
from machine_learning.predict_ding_di import combine_interval_points
from data_interface.data_interface import gain_code_data_excel
# just two days
# def get_ding(input_data):
# one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
# cond_1 = one["high"] > two["high"]
# if not cond_1:
# return False
# cond_2 = two["high"] < one["low"]
# if not cond_2:
# return False
# return True
#
#
# def get_di(input_data):
# one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
# cond_1 = one["high"] < two["high"]
# if not cond_1:
# return False
# cond_2 = two["low"] > one["high"]
# if not cond_2:
# return False
# return True
def get_ding(input_data):
# sliding_window = 3
one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["high"] > two["high"] > three["high"]
# cond_1 = one["low"] > two["low"] > three["low"]
if not cond_1:
return False
cond_2 = two["high"] < one["low"] or three["high"] < one["low"]
if not cond_2:
return False
return True
def get_di(input_data):
# sliding_window = 4
one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["low"] < two["low"] < three["low"]
# cond_1 = one["high"] < two["high"] < three["high"]
if not cond_1:
return False
cond_2 = two["low"] > one["high"] or three["low"] > one["high"]
if not cond_2:
return False
return True
def get_ding_v2(input_data):
# sliding_window = 3
one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = two["high"] > one["high"] and two["high"] > three["high"]
# cond_1 = one["low"] > two["low"] > three["low"]
if not cond_1:
return False
cond_2 = two["low"] > one["low"] and two["low"] > three["low"]
if not cond_2:
return False
return True
def get_di_v2(input_data):
# sliding_window = 3
one, two, three = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = two["low"] < one["low"] and two["low"] < three["low"]
# cond_1 = one["high"] < two["high"] < three["high"]
if not cond_1:
return False
cond_2 = two["high"] < one["high"] and two["high"] < three["high"]
if not cond_2:
return False
return True
def get_ding_four(input_data):
# sliding_window = 3
one, two, three, four = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["high"] > two["high"] > three["high"] > four["high"]
if not cond_1:
return False
cond_2 = two["high"] < one["low"] or three["high"] < one["low"] or four["high"] < one["low"]
if not cond_2:
return False
return True
def get_di_four(input_data):
# sliding_window = 4
one, two, three, four = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["low"] < two["low"] < three["low"] < four["low"]
if not cond_1:
return False
cond_2 = two["low"] > one["high"] or three["low"] > one["high"] or four["low"] > one["high"]
if not cond_2:
return False
return True
def get_ding_five(input_data):
# sliding_window = 3
one, two, three, four, five = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["high"] > two["high"] > three["high"] > four["high"] > five["high"]
if not cond_1:
return False
cond_2 = two["high"] < one["low"] or three["high"] < one["low"] or four["high"] < one["low"] or five["high"] < one[
"low"]
if not cond_2:
return False
return True
def get_di_five(input_data):
# sliding_window = 4
one, two, three, four, five = input_data # (input_data[0], input_data[1], input_data[2])
cond_1 = one["low"] < two["low"] < three["low"] < four["low"] < five["low"]
if not cond_1:
return False
cond_2 = two["low"] > one["high"] or three["low"] > one["high"] or four["low"] > one["high"] or five["low"] > one[
"high"]
if not cond_2:
return False
return True
def get_advance_ding(input_data):
one, two, three, four = input_data
cond_1 = two["high"] > three["high"] and one["high"] < two["high"]
if not cond_1:
return False
cond_2 = one["low"] > two["low"] and one["low"] > three["low"]
if not cond_2:
return False
cond_3 = two["high"] > three["high"] > four["high"]
if not cond_3:
return False
cond_4 = two["low"] > three["low"] > four["low"]
if not cond_4:
return False
cond_5 = one["high"] < three["high"] and one["high"] < four["high"]
if not cond_5:
return False
return True
def get_advance_di(input_data):
one, two, three, four = input_data
cond_1 = two["low"] < three["low"] and two["low"] < one["low"]
if not cond_1:
return False
cond_2 = one["high"] < two["high"] and one["high"] < three["high"]
if not cond_2:
return False
cond_3 = two["high"] < three["high"] < four["high"]
if not cond_3:
return False
cond_4 = two["low"] < three["low"] < four["low"]
if not cond_4:
return False
cond_5 = one["low"] > three["low"] and one["low"] > four["low"]
if not cond_5:
return False
return True
def run_functions(functions, data, adjustment=None):
data.index = range(len(data))
result = []
max_one_slide = max([functions[i]["sliding_window"] for i in functions])
for i in range(len(data) - max_one_slide + 1):
this_result = []
for fun_name in functions:
fun = functions[fun_name]["fun"]
one_slide = functions[fun_name]["sliding_window"]
the_input_data = [data.loc[i + j] for j in range(one_slide)]
if fun(the_input_data):
this_result.append(fun_name)
if len(this_result) == 0:
continue
if not adjustment is None and adjustment is True:
# 纠正过滞后的
if len(this_result) == 1:
result.append({"index": i + max_one_slide - 1, "type_is": this_result[0]})
else:
result.append({"index": i + max_one_slide - 1, "type_is": this_result})
else:
if len(this_result) == 1:
result.append({"index": i, "type_is": this_result[0]})
else:
result.append({"index": i, "type_is": this_result})
return result
#######################################################################################################################
# make it pretty #################################################################################################
#######################################################################################################################
def link_between_ding_di(data, fun_result):
if len(fun_result) == 0:
return []
result = []
for index in range(len(fun_result) - 1):
source_index = fun_result[index]["index"]
target_index = fun_result[index + 1]["index"]
date_interval = target_index - source_index
source = data.loc[source_index]
target = data.loc[target_index]
price_interval = target["open"] - source["open"]
delta = price_interval / date_interval
inter_result = []
for small_index in range(date_interval):
inter = {
"point": source["open"] + delta * small_index,
"index": small_index + source_index
}
inter_result.append(inter)
result.extend(inter_result)
return result
def merge_result_back_to_data(data, linked_result, fun_result):
if len(fun_result) == 0:
data["point"] = data["open"]
data["ptype"] = "in_trend"
return data
linked_result = pd.DataFrame(linked_result)
linked_result.index = linked_result["index"]
fun_result = pd.DataFrame(fun_result)
fun_result.index = fun_result["index"]
data["point"] = linked_result["point"]
data["ptype"] = fun_result["type_is"]
# min_open = data["open"].min()
# data.apply(lambda x: x["open"] if x["point"] == np.nan else x["point"], axis=1)
# data.apply(lambda x: x["open"] if x["point"] != np.nan else x["point"], axis=1)
data["point"] = data.apply(lambda x: x["open"] if pd.isna(x["point"]) else x["point"], axis=1)
data["ptype"] = data["ptype"].fillna("in_trend")
return data
def generate_ding_di(code, combine_switch=True, adjustment=None):
functions = {"ding": {"fun": get_ding, "sliding_window": 3}, "di": {"fun": get_di, "sliding_window": 3}}
# code = "000596" # 600036 # 600298 # 000858 # 600999 # 000527 # 600717 # 600030
# try:
# data = pd.read_excel("trade/data/code" + code + ".xlsx")
# except FileNotFoundError:
# to_download_data_path(code, "trade/data/")
# data = pd.read_excel("trade/data/code" + code + ".xlsx")
data = gain_code_data_excel(code)
data.index = range(len(data))
fun_result = run_functions(functions, data, adjustment)
if combine_switch:
fun_result = combine_interval_points(fun_result, data, just_first_one=True)
linked_result = link_between_ding_di(data, fun_result)
result = merge_result_back_to_data(data, linked_result, fun_result)
result = visual(result)
return result
if __name__ == '__main__':
this_functions = {"ding": {"fun": get_ding, "sliding_window": 3}, "di": {"fun": get_di, "sliding_window": 3}}
# this_functions = {"ding": {"fun": get_ding_v2, "sliding_window": 3}, "di": {"fun": get_di_v2, "sliding_window": 3}}
# this_functions = {"ding": {"fun": get_advance_ding, "sliding_window": 4},
# "di": {"fun": get_advance_di, "sliding_window": 4}}
# this_functions = {"ding": {"fun": get_ding_four, "sliding_window": 4},
# "di": {"fun": get_di_four, "sliding_window": 4}}
# this_functions = {"ding": {"fun": get_ding_five, "sliding_window": 5},
# "di": {"fun": get_di_five, "sliding_window": 5}}
# this_data = pd.read_excel("sh60030.xlsx")
this_code = "000596" # 600036 # 600298 # 000858 # 600999 # 000527 # 600717 # 600030
try:
this_data = pd.read_excel("trade/data/code" + this_code + ".xlsx")
except FileNotFoundError:
to_download_data_path(this_code, "trade/data/")
this_data = pd.read_excel("trade/data/code" + this_code + ".xlsx")
this_data.index = range(len(this_data))
this_fun_result = run_functions(this_functions, this_data)
this_fun_result = combine_interval_points(this_fun_result, this_data, just_first_one=True)
this_linked_result = link_between_ding_di(this_data, this_fun_result)
a = merge_result_back_to_data(this_data, this_linked_result, this_fun_result)
# a.to_excel("result" + this_code + ".xlsx")
a = visual(a)
# print(this_fun_result[:2])
with open("result.json", mode="w") as a_file:
a_file.write(a)
# print(a)
# begin = data["open"].loc[:linked_result["index"].iloc[0]]
# inter = list(begin)
# inter2 = list(linked_result["point"])
# inter.extend(inter2)
# data["point"] = inter
|
import rosbag
import json
import datetime
import glob
import re, time
def convert_bag2txt (filename, output_dir, topics_list):
# creates txt files for each bag by app
# bag_free -> FreeExplorationApp
# bag_spatial -> SpatialSkillAssessmentApp
# bag_tangram -> TangramMindsetApp
# bag_mindset -> mindset_assessment_app
currentApp=""
#file_info = re.split('[_]', filename)
file_info = re.split('[_|.]',filename) #rinat added . to seperate uid from .bag
pID = file_info[8].lower()
print pID
bag = rosbag.Bag(filename)
#topics = bag.get_type_and_topic_info()[1].keys() # ['/tega_state', '/tega', '/rosout', '/rosout_agg', '/log', '/robot_affdex']
#print(nBag,topics) # just nice know
f_spatial = open(output_dir+'/bag_spatial_'+pID+'.txt','w')
#f_spatial_csv = open(output_dir+'gbag_spatial_'+pID+".csv", 'w+')
#f_spatial_csv.write("action" + "," + "comment" + "," + "time" + '\n')
f_free = open(output_dir+'/bag_free_'+pID+'.txt','w')
f_mindset = open(output_dir+'/bag_mindset_'+pID+'.txt','w')
f_tangram = open(output_dir+'/bag_tangram_'+pID+'.txt','w')
for topic, msg, t in bag.read_messages(topics=topics_list):
# detect what is the current app:
#print(currentApp)
#print (msg)
date = datetime.datetime.fromtimestamp(t.secs) # converts rospy.rostime.Time to datetime.datetime
strDate = date.strftime('%Y-%m-%d-%H-%M-%S')
mindset_app_keywords = ['buffy','fluffy']
spatial_app_keywords = ["SpatialSkillAssessmentApp","_A'","_B'","_C'","_D'"]
free_app_keywords = ["FreeExplorationApp",'babyseal','snowman1','penguin','kid4','cloud','dragon','dinosaur','rabbit','bird','princess']
start_app_keywords = ["start_button_pre","start_button_post"]
ignore_keywords = ["subject_id"]
if any(x in str(msg) for x in ignore_keywords):
continue
if any(x in str(msg) for x in start_app_keywords):
if (currentApp == 'SpatialSkillAssessmentApp'):
f_spatial.write(str(msg)+'\n')
#read_spatial_skill(topic,msg,strDate,f_spatial_csv)
elif (currentApp == 'FreeExplorationApp'):
f_free.write(str(msg)+'\n')
else:
f_mindset.write(str(msg) + '\n')
else:
if any(x in str(msg) for x in mindset_app_keywords):
currentApp = 'mindset_assessment_app'
elif any(x in str(msg) for x in spatial_app_keywords):
currentApp = 'SpatialSkillAssessmentApp'
elif any(x in str(msg) for x in free_app_keywords):
currentApp = 'FreeExplorationApp'
else:
currentApp = 'TangramMindsetApp'
# write msg to the current app txt file:
if (currentApp == 'SpatialSkillAssessmentApp'):
f_spatial.write(str(msg)+'\n')
#read_spatial_skill(topic,msg,strDate,f_spatial_csv)
elif (currentApp == 'FreeExplorationApp'):
f_free.write(str(msg)+'\n')
elif (currentApp == 'mindset_assessment_app'):
f_mindset.write(str(msg) + '\n')
elif (currentApp == 'TangramMindsetApp'):
f_tangram.write(str(msg)+'\n')
bag.close()
f_spatial.close()
#f_spatial_csv.close()
f_free.close()
f_mindset.close()
f_tangram.close()
def read_spatial_skill(topic,msg,strDate,f_spatial_csv):
#print("goren",msg.data)
raw_str = str(msg.data)
raw_str = raw_str.replace("u'","'")
#print("1",raw_str)
raw_str = raw_str.replace('"','XXX')
#print("2",raw_str)
raw_str = raw_str.replace("'",'"')
#print("3",raw_str)
raw_str = raw_str.replace('XXX', "'")
#print ("4",raw_str)
#raw_str = raw_str.encode('utf-8')
raw_str = raw_str.encode('ascii','ignore')
raw_dict = json.loads(raw_str)
action = raw_dict['action']
comment = raw_dict['comment']
obj = raw_dict['obj']
time = raw_dict['time']
#print(action)
#if (action=='down'):
#comment=
#f_spatial_csv.write(action+","+comment+","+obj+","+strDate+'\n')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
# convert bag files
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
dir = "../NIH_grit_pilot_rosbag/"
#dir = "./bags"
output_dir = "results/txt/"
#output_dir = "processed_data/txt/"
#convert_bag2txt('../NIH_grit_pilot_rosbag/nih_pilot_mindset_p006_2016-08-25-13-00-00.bag', output_dir, topics_list=['/log'])
files = glob.glob(dir+"/*.bag")
for filename in files:
#convert_bag2txt (filename, topics_list=['/log','/tega'])
convert_bag2txt(filename, output_dir, topics_list=['/log'])
|
import torch
import pickle
import gym_super_mario_bros
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from modules.image_tools import get_env_fix
from modules.agents_DQN import DQNAgent
def get_max_episode(reward_list):
max_reward = int(max(reward_list))
return {"max_reward":max_reward,
"max_episode":reward_list.index(max_reward)+1}
def deploy(num_episodes, training_mode, pretrained, mario_world_level):
path_training_info = "training_data/"+mario_world_level+"_"
env = gym_super_mario_bros.make('SuperMarioBros-'+mario_world_level+'-v0')
env = get_env_fix(env) # Wraps the environment so that frames are grayscale
observation_space = env.observation_space.shape
action_space = env.action_space.n
agent = DQNAgent(state_space=observation_space,
action_space=action_space,
max_memory_size=30000,
batch_size=32,
gamma=0.90,
lr=0.00025,
dropout=0.,
exploration_max=1.0,
exploration_min=0.02,
exploration_decay=0.99,
double_dq=True,
pretrained=pretrained,
path_level=path_training_info)
graph_prop = int((num_episodes/10000)*500)
env.reset()
total_rewards = []
for ep_num in tqdm(range(num_episodes)):
state = env.reset()
state = torch.Tensor([state])
total_reward = 0
steps = 0
while True:
if not training_mode:
env.render()
action = agent.act(state)
steps += 1
state_next, reward, terminal, info = env.step(int(action[0]))
total_reward += reward
state_next = torch.Tensor([state_next])
reward = torch.tensor([reward]).unsqueeze(0)
terminal = torch.tensor([int(terminal)]).unsqueeze(0)
if training_mode:
agent.remember(state, action, reward, state_next, terminal)
agent.experience_replay()
state = state_next
if terminal:
break
total_rewards.append(total_reward)
if (ep_num + 1) % 10 == 0:
max_ep_rew = get_max_episode(total_rewards)
print(" MAX Reward ===>",max_ep_rew["max_reward"],
"in episode",max_ep_rew["max_episode"])
num_episodes += 1
if training_mode:
print("\nSaving data ......")
with open(path_training_info+"ending_position.pkl", "wb") as f:
pickle.dump(agent.ending_position, f)
with open(path_training_info+"num_in_queue.pkl", "wb") as f:
pickle.dump(agent.num_in_queue, f)
with open(path_training_info+"total_rewards.pkl", "wb") as f:
pickle.dump(total_rewards, f)
if agent.double_dq:
torch.save(agent.local_net.state_dict(), path_training_info+"dq1.pt")
torch.save(agent.target_net.state_dict(), path_training_info+"dq2.pt")
else:
torch.save(agent.dqn.state_dict(), path_training_info+"dq.pt")
torch.save(agent.STATE_MEM, path_training_info+"STATE_MEM.pt")
torch.save(agent.ACTION_MEM, path_training_info+"ACTION_MEM.pt")
torch.save(agent.REWARD_MEM, path_training_info+"REWARD_MEM.pt")
torch.save(agent.STATE2_MEM, path_training_info+"STATE2_MEM.pt")
torch.save(agent.DONE_MEM, path_training_info+"DONE_MEM.pt")
env.close()
max_ep_rew = get_max_episode(total_rewards)
print(" MAX Reward ===>",max_ep_rew["max_reward"],
"in episode",max_ep_rew["max_episode"])
if num_episodes > graph_prop:
plt.title("Episodes trained vs. Average Rewards")
plt.plot([0 for _ in range(graph_prop)] +
np.convolve(total_rewards, np.ones((graph_prop,))/graph_prop, mode="valid").tolist())
plt.show()
|
from actual_view import actual_conversation, actual_task, fail
from actual_view import first_utterance, read_user_utterance
from code_view import code
from error_404 import error_404
from index_view import index
from sample_view import sample_conversation, sample_task
from survey_view import save_survey, survey
from utils import check_answers
|
import QuantLib as ql
import math
from vol_models.data_utils import get_ir_ts, get_implied_vols_sticky_strike
class VolModel:
def __init__(self, vol_data):
self.vol_data = vol_data
self.init_defaults()
def get_vol(self, strike, time, recalibrate=True):
pass
def get_variance(self, strike, time):
vol = self.get_vol(strike, time)
return vol*vol*time
def init_defaults(self):
self.day_count = ql.Actual365Fixed()
self.calendar = ql.UnitedStates()
self.calculation_date = ql.Date(self.vol_data.market_date.day, self.vol_data.market_date.month, self.vol_data.market_date.year)
self.spot = self.vol_data.spot
# This is a dangerous hook. TODO: make the evaluation date dynamic
# Danger is avoided now by having same market date across data
ql.Settings.instance().evaluationDate = self.calculation_date
self.dom_ts = get_ir_ts(self.vol_data.smiles, True, self.calculation_date, self.day_count)
self.for_ts = get_ir_ts(self.vol_data.smiles, False, self.calculation_date, self.day_count)
self.strikes = self.vol_data.strikes
self.expiration_dates, implied_vols = \
get_implied_vols_sticky_strike(self.vol_data.smiles)
self.implied_vols = ql.Matrix(len(self.strikes), len(self.expiration_dates))
for i in range(self.implied_vols.rows()):
for j in range(self.implied_vols.columns()):
self.implied_vols[i][j] = implied_vols[j][i]
|
# -*- coding: utf-8 -*-
"""
Cross platform IRC Bot.
Tested on linux and Nokia Symbian S60v3.
Features
- Extendable with reloadable easy to write commands and events
- Provides windows, events and commands with user management and authentication
"""
import os
import sys
import time
import string
import traceback
# Symbian S60 specific compatibility
s60 = False
if sys.platform == "symbian_s60":
s60 = True
sys.path.append("e:\\python")
sys.path.append("c:\\python")
sys.path.append("c:\\DATA\\python")
import irc
from helpers import *
from const import *
import logger
import mods
import config
import comps
from comps import *
class IRCBot(irc.IRCClient):
def __init__(self):
self.log = logger.Logger(self)
irc.IRCClient.__init__(self)
self.commands = {}
self.zones = [IRC_ZONE_QUERY, IRC_ZONE_CHANNEL, IRC_ZONE_BOTH]
self.bot_running = 0
self.bot_debugging = 1
self.current_path = get_current_script_path()
config_file = "config.txt"
if len(sys.argv) > 1:
config_file = sys.argv[1]
if s60:
self.config = config.BotConfig(self, "e:\\python\\config.txt")
else:
self.config = config.BotConfig(self, config_file)
self.config.Load()
# Setup the bot identity and command line user
self.me = Me(self)
self.admin = Admin(self)
# Prepopulate users and windows
self.users = [self.me, self.admin]
self.windows = [Console(self, self.me)]
self.commands = {}
self.listeners = {}
self.commands_cache = {}
self.listeners_cache = {}
self.LoadModules()
self.log.Loaded()
#
# Logging
#
def BotLog(self, *args):
try:
x = self.bot_running
except:
return False
args = map(arg_to_str, args)
line = " ".join(args)
self.log.Log("bot", line)
#
# Manage loading of bot config and modules
#
def ReloadBot(self):
try:
reload( comps )
return True
except Exception,e:
msg = get_error_info()
self.log.Error("bot", msg)
return e
self.me = None
self.channels = []
self.users = [self.me, self.admin]
self.ReloadModules()
def ReloadConfig(self):
try:
reload(config)
self.config = config.BotConfig(self.config_file)
self.config.Load()
except Exception, e:
return e
return True
def ResetModules(self):
self.commands = {}
self.listeners = {}
self.commands_cache = {}
self.listeners_cache = {}
def LoadModules(self):
use = self.config.GetMods()
self.commands = {}
self.listeners = {}
modules = mods.modules
for name in modules.keys():
if not name in use:
continue
mod = modules[name]
mod = self.config.ApplyModConfig(mod)
if mod["type"] == MOD_COMMAND:
self.commands[name] = mod
if mod["type"] == MOD_LISTENER:
self.listeners[name] = mod
self.RunListener(name)
if mod["type"] == MOD_BOTH:
self.listeners[name] = mod
self.commands[name] = mod
self.RunListener(name)
def ReloadModules(self):
try:
import bot
reload(__import__("mod_base"))
reload(mods)
except Exception,e:
msg = get_error_info()
self.log.Error("bot", msg)
return e
self.ResetModules()
self.LoadModules()
return True
def EnableModule(self, name):
if not name in mods.modules.keys():
return False
self.config.EnableMod(name)
if name in self.listeners.keys():
self.RunListener(name)
def DisableModule(self, name):
if not name in mods.modules.keys():
return False
self.config.DisableMod(name)
if name in self.listeners_cache.keys():
del self.listeners_cache[name]
#
# Run commands and events
#
def HandleMessage(self, win, user, msg):
self.HandleEvent(Event(IRC_EVT_MSG, win, user, msg))
commands = self.FindCommands(msg)
if commands:
for command in commands:
self.HandleCommand(win, user, msg, command)
def HandleCommand(self, win, user, msg, command):
if command:
data = None
if command.find(" ") != -1:
data = command[command.find(" ") + 1:]
command = command[:command.find(" ")].lower()
else:
command = command.lower()
if data == "": data = None
self.RunCommand(command, win, user, data)
def FindCommands(self, message):
# Characters after cmd_prefix to ignore
ignore = [" ", ".", "-", "!", "?", "_"]
# Check if message begins with cmd_prefix
if message[:len(self.config["cmd_prefix"])] == self.config["cmd_prefix"]:
commands = message[len(self.config["cmd_prefix"]):]
if len(commands) == 0:
return False
if commands[0] in ignore:
return False
cmds = commands.split(self.config["cmd_separator"])
cmds = [cmd.strip() for cmd in cmds]
return cmds
else: # If not, check if it begins with the bots nick
parts = message.split(" ")
if parts[0].lower().startswith(self.me.nick.lower()):
rest = parts[0].lower()[len(self.me.nick):]
if not rest:
return "" # Return empty string to trigger unknown command event
if rest[0] in [" ", ",", ";",":"]:
cmds = " ".join(parts[1:]).split(self.config["cmd_separator"])
cmds = [cmd.strip() for cmd in cmds]
return cmds
return False
def GetCommand(self, name):
alias = self.config.AliasToCommand(name)
if alias:
name = alias
if name in self.commands_cache.keys():
return self.commands_cache[name]
if name in self.commands.keys():
command = self.commands[name]
instance = command["class"](self, command)
instance.SetProperties(command)
props = self.config.GetModule(name)
if props:
instance.SetProperties(props)
self.commands_cache[name] = instance
instance.init()
return instance
return False
def GetModule(self, name):
mod = self.GetCommand(name)
if mod:
return mod
if name in self.listeners.keys():
return self.listeners_cache[name]
return False
def GetCommandsByPermission(self, level = 0):
cmds = []
for command in self.commands.keys():
cmd_level = self.commands[command]["level"]
if cmd_level > level: continue
cmds.append(command)
return cmds
def RunCommand(self, command, win, user, data):
inst = self.GetCommand(command)
if inst != False:
args = ""
if data:
try:
args = unicode(data)
except:
args = data
data = data or None
line = str(user)+" "+command+" "+args
self.log.Log("bot", line, color=logger.colors.RED)
inst.Execute(win, user, data)
else:
if not self.HandleEvent(Event(IRC_EVT_UNKNOWN_CMD, win, user, cmd = command, cmd_args=data)):
win.Privmsg("don't understand")
def RunListener(self, name):
listener = self.listeners[name]
instance = listener["class"](self, listener)
instance.SetProperties(listener)
props = self.config.GetModule(name)
if props:
instance.SetProperties(props)
if listener["type"] == MOD_BOTH:
self.commands_cache[name] = instance
self.listeners_cache[name] = instance
return instance.init()
def HandleEvent(self, event):
handled = False
for listener in self.listeners_cache.keys():
# incase module removed during looping
try:
lstn = self.listeners_cache[listener]
except:
continue
if IRC_EVT_ANY in lstn.events:
lstn.ExecuteEvent(event)
if event.id in lstn.events:
value = lstn.ExecuteEvent(event)
if not handled:
handled = value or False
return handled
#
# Manage virtual windows
#
def MakeWindow(self, name): #When we make a window, we need to set it up with stuff from the config file
if self.IsChannelName(name):
win = Channel(self, name)
self.windows.append(win)
else:
win = Query(self, self.GetUser(name, True))
self.windows.append(win)
return win
def GetWindow(self, name, create=True): #FIXME: Might want to default create to False
for win in self.windows:
if win.GetName() == name:
return win
if create:
win = self.MakeWindow(name)
return win
else:
return False
def MakeUser(self, nick):
user = User(self, nick)
self.users.append(user)
return user
def GetUser(self, nick, create = False):
for user in self.users:
if user.nick == nick:
return user
if create:
#self.log.Info("bot", "Creating user "+nick)
user = self.MakeUser(nick)
return user
return False
def RemoveUser(self, nick):
for user in self.users:
if user.nick.lower() == nick.lower():
self.users.pop(self.users.index(user))
return True
return False
def FindUser(self, nick):
for user in self.users:
if user.nick.lower() == nick.lower():
return user
return False
# Actions
def DoAutoJoin(self):
self.JoinChannels(self.config.GetAutoJoinChannels())
def AuthenticateUser(self, user, name, pw):
account = self.config.AuthenticateUser(name, pw)
if account == False:
return False
user.OnAuthed(account)
return True
def AuthenticateHostname(self, user, hostname):
account = self.config.AuthenticateHostname(hostname)
if account == False:
return False
user.OnAuthed(account)
return True
#
# Events
#
def OnLoop(self):
for listener in self.listeners_cache.keys():
lstn = self.listeners_cache[listener]
if IRC_EVT_INTERVAL in lstn.events:
if not lstn.last_exec:
lstn.ExecuteEvent(Event(IRC_EVT_INTERVAL))
elif time.time() - lstn.last_exec > lstn.interval:
lstn.ExecuteEvent(Event(IRC_EVT_INTERVAL))
def OnInterrupt(self):
print ""
return self.HandleEvent(Event(IRC_EVT_INTERRUPT))
def OnClientLog(self, line): # Route irc client class logging to BotLog
self.log.Log("irc", line)
# Returning True prevents the irc client class from printing the logging to the console
return True
def OnConnected(self):
self.log.Log("bot", "Connected to IRC server.")
self.DoIntroduce(self.me.nick, self.config["identity"]["ident"], self.config["identity"]["realname"])
def OnReady(self):
self.log.Log("bot", "IRC handshake done, now ready.")
self.throttled = 0
self.HandleEvent(Event(IRC_EVT_READY))
self.DoAutoJoin()
def OnNickInUse(self, nick, reason):
self.me.TryNewNick()
def OnUserHostname(self, nick, hostname):
self.GetUser(nick, True).SetHostname(hostname)
def OnWhoisHostname(self, nick, hostname):
self.GetUser(nick, True).SetHostname(hostname)
def OnUserNickChange(self, nick, new_nick):
# Make sure nick doesn't exist, just in case.
test_user = self.GetUser(new_nick)
if test_user != False:
# If it does exist, we remove the old user and show a warning.
self.log.Warning("bot", nick+" changed to existing user: "+new_nick+". Something wrong!")
self.RemoveUser(new_nick)
self.GetUser(nick).OnNickChanged(nick, new_nick)
def OnUserQuit(self, nick, reason):
user = self.GetUser(nick)
user.OnQuit(reason)
for win in self.windows:
if win.zone == IRC_ZONE_CHANNEL:
if win.HasUser(user):
win.OnQuit(user, reason)
def OnPrivmsg(self, by, to, msg):
user = self.GetUser(by, True)
if self.IsChannelName(to):
win = self.GetWindow(to)
else:
win = self.GetWindow(by)
win.OnPrivmsg(user,msg)
self.HandleMessage(win, user, msg)
def OnNotice(self, by, to, msg):
user = self.GetUser(by, True)
if self.IsChannelName(to):
win = self.GetWindow(to)
else:
win = self.GetWindow(by)
win.OnNotice(user,msg)
def OnIJoined(self, chan):
win = self.GetWindow(chan)
win.OnIJoined()
win.AddUser(self.me)
def OnChannelHasUsers(self, chan, users):
""" Called when the server indicates which users are present on a channel. """
self.GetWindow(chan).OnHasUsers(users)
def OnChannelModesChanged(self, chan, modes, nick):
user = self.GetUser(nick)
win = self.GetWindow(chan)
win.OnModesChanged(modes, user)
def OnChannelUserModesChanged(self, chan, nickmodes, by):
usermodes = []
for nickm in nickmodes:
usermodes.append( (self.GetUser(nickm[0]),nickm[1],nickm[2]) )
self.GetWindow(chan).OnUserModesChanged(usermodes,by)
def OnChannelJoin(self, chan, nick):
win = self.GetWindow(chan)
win.OnJoin(self.GetUser(nick, True))
def OnChannelPart(self, chan, nick, reason):
self.DebugLog("OnChannelPart(", chan, nick, reason, ")")
self.GetWindow(chan).OnPart(self.GetUser(nick), reason)
def OnChannelKick(self, chan, who, nick, reason):
self.GetWindow(chan).OnKick(self.GetUser(who), self.GetUser(nick), reason)
def OnChannelTopicIs(self, chan, topic):
self.GetWindow(chan).OnTopicIs(topic)
def OnChannelTopicMeta(self, chan, nick, utime):
self.GetWindow(chan).OnTopicMeta(nick, utime)
def OnChannelTopicChanged(self, chan, by, topic):
user = self.GetUser(by)
self.GetWindow(chan).OnTopicChanged(topic, user)
def RunBot(self):
self.bot_running = 1
self.log.Log("bot", "Starting bot...")
self.SetHost(self.config["server"]["host"])
self.SetPort(self.config["server"]["port"])
self.SetSendThrottling(self.config["send_throttle"])
status = self.BotLoop()
self.log.Log("bot", "Run()","bot loop returned status", status)
self.log.Log("bot", "Run()","terminated!")
return status
def StopBot(self):
self.irc_running = 0
self.bot_running = 0
return True
def BotLoop(self):
self.StartClient()
while self.bot_running:
self.log.Log("bot", "BotLoop()","client disconnected")
if self.irc_throttled:
time.sleep(self.config["throttle_wait"])
else:
self.log.Log("bot", "BotLoop()","reconnecting in 10sec")
time.sleep(10)
self.StartClient()
self.log.Log("bot", "BotLoop()","client disconnected")
if __name__ == "__main__":
b = IRCBot()
b.RunBot()
|
from datetime import date
import time, datetime
from .. import db
from ..models import User, Blog, Comment
from werkzeug.security import generate_password_hash, check_password_hash
def initialize():
users = [("Bethwel", "Kip", "bethwelkiplimo@gmail.com", False), ("Brian", "Kos", "kos@kos.com", False), ("Dominic", "Tei", "tei@tei.com", False), ("Elvos", "Ron", "ron@ron.com", False)]
for user, password, email, log in users:
user = User(username = user, password = generate_password_hash(password), email =email, logged_in = log)
db.session.add(user)
db.session.commit()
now = datetime.datetime.now()
blogs = [("Life", "No one knows how it goes", now), ("Biking", "Such a great activity", now), ("Moringa", "Such a great place",datetime.datetime.now()),("Blog", "A nice to have thing", datetime.datetime.now())]
usees = User.query.all()
i = 0
for user in usees:
new_blog = Blog(title = blogs[i][0], blog = blogs[i][1], date = blogs[i][2], user_id = user.user_id)
print(blogs[i][0])
db.session.add(new_blog)
db.session.commit()
i = i + 1
comments = ["I loved reading your article", "Good work", "Too many typos", "Would love to read more", "I have repeated this comments"]
blogs = Blog.query.all()
for blog in blogs:
for comment in comments:
new_comment = Comment(comment=comment,blog_id = blog.blog_id)
db.session.add(new_comment)
db.session.commit()
|
import json
from pprint import pprint
#Returns data dictionary
def read_json_file(file_name):
fo = open(file_name,"r") #Open file json
data_string = fo.read() #Obtain string from file
data = json.loads(data_string) #from string to data
fo.close()
return data
input_dict = read_json_file("input.json")
print('\nInput Data\n')
pprint(input_dict)
|
# # Create a function called odd_even that counts from 1 to 2000. As your loop executes have your program print the number of that iteration and specify whether it's an odd or even number
def odd_even():
for count in range(1, 2000):
if count % 2 != 0:
print "Number is " + str(count) + ". This is an odd number."
else:
print "Number is " + str(count) + ". This is an even number."
odd_even()
#
# # Create a function called 'multiply' that iterates through each value in a list (e.g. a = [2, 4, 10, 16]) and returns a list where each value has been multiplied by 5.
# # Multiply
a = [2,4,10,16]
def multiply(a,b):
new_list = []
for num in a:
new_list.append(num * b)
print new_list
multiply(a, 5)
a = [2,4,6]
b=3
def multiple(a,b):
for j in range(0, len(a)-1):
ones= a[j]*b
new_array=[]
for i in range(0, ones):
new_array.append(1)
a[j]= new_array
multiple(a,b)
|
import numpy as np
import pandas as pd
import json
import math
pd.options.mode.chained_assignment = None
import plotly.plotly as py
import plotly.graph_objs as go
terror_data = pd.read_csv('datasets/globalterrorismdb_0616dist.csv', encoding='ISO-8859-1', usecols=[0, 1, 2, 3, 8, 11, 13, 14, 29, 35, 84, 100, 103])
terror_data = terror_data.rename(columns={'eventid' :'id',
'iyear' :'year',
'imonth' :'month',
'iday' :'day',
'country_txt' :'country',
'provstate' :'state',
'targtype1_txt' :'target',
'attacktype1_txt' :'attack',
'weaptype1_txt' :'weapon',
'nkill' :'fatalities',
'nwound' :'injuries'})
data = terror_data[(pd.isnull(terror_data.year) == False) &
(pd.isnull(terror_data.day) == False) &
(pd.isnull(terror_data.country) == False) &
(pd.isnull(terror_data.state) == False) &
(pd.isnull(terror_data.target) == False) &
(pd.isnull(terror_data.attack) == False) &
(pd.isnull(terror_data.weapon) == False) &
(pd.isnull(terror_data.longitude) == False) &
(pd.isnull(terror_data.latitude) == False) &
(pd.isnull(terror_data.fatalities) == False) &
(pd.isnull(terror_data.injuries) == False) &
(terror_data.weapon != 'Unknown') &
(terror_data.attack != 'Unknown') &
(terror_data.target != 'Unknown') &
(terror_data.state != 'Unknown') &
(terror_data.country != 'Unknown')
]
filename = 'terror_filtered_db.csv'
data.to_csv(filename, encoding='utf-8', index=False, index_label=False)
print('Saved ' + str(len(data)) + ' instances to file \'' + filename + '\'')
|
"""
Filreader enriching files with synonyms out of wordnet
"""
import sys
from os import listdir, rename, makedirs, remove
from os.path import join, isfile, dirname, exists
import shutil
from pydub import AudioSegment
import subprocess
__author__ = "kaufmann-a@hotmail.ch"
temp_path = "./temp"
#Beispiel ffmpeg mp4 streams auftrennen und zusammenmergen: ffmpeg -i test.mp4 -filter_complex "[0:1][0:2]amerge=inputs=2[ab]" -map [ab] 1.wav
#Hier wurden streams 2 und 3 gemerged
def normalize(file, destination, db = -20.0):
def match_target_amplitude(sound, target_dBFS):
change_in_dBFS = target_dBFS - sound.dBFS
return sound.apply_gain(change_in_dBFS)
sound = AudioSegment.from_file(file, "wav")
normalized_sound = match_target_amplitude(sound, db)
normalized_sound.export(destination, format="wav")
def calculate_ratio_instr_vocs(instr, voc):
instrlevel = AudioSegment.from_file(instr, "wav").dBFS
voclevel = AudioSegment.from_file(voc, "wav").dBFS
targetDB_VOC = -20 + (-20 * (voclevel / instrlevel - 1))
return targetDB_VOC
def copy_files(sourcedir, outputdir, maxCopy, override):
src_files= listdir(sourcedir)
for file in src_files:
if maxCopy == 0: break
old_file = join(sourcedir, file)
new_folder = join(outputdir, file)
new_songname_instr = 'instrumental_' + file
new_songname_vocals = 'vocals_' + file
new_songfile_instr = join(new_folder, new_songname_instr)
new_songfile_vocals = join(new_folder, new_songname_vocals)
if not exists(new_folder): makedirs(new_folder)
if exists(new_songfile_instr) and override: remove(new_songfile_instr)
if exists(new_songfile_vocals) and override: remove(new_songfile_vocals)
if (not exists(new_songfile_vocals) and not exists(new_songfile_instr)) or override:
cmd = "ffmpeg -i \"" + old_file + "\" -filter_complex \"[0:a]channelsplit=channel_layout=stereo[l][r]\" -map [l] -ac 2 -ar 44100 \"" + join(temp_path, new_songname_instr) + "\" -map [r] -ac 2 -ar 44100 \"" + join(temp_path, new_songname_vocals) + "\""
subprocess.check_call(cmd, shell=True) # cwd = cwd
vocal_volume = calculate_ratio_instr_vocs(join(temp_path, new_songname_instr), join(temp_path, new_songname_vocals))
normalize(join(temp_path, new_songname_instr), new_songfile_instr, -20)
normalize(join(temp_path, new_songname_vocals), new_songfile_vocals, vocal_volume)
print("\n" + new_songname_vocals + " and " + new_songname_instr + " converted" + "\n")
remove(join(temp_path, new_songname_vocals))
remove(join(temp_path, new_songname_instr))
maxCopy -= 1
if __name__ == '__main__':
#Call script with scriptname maxfiles override
#Example call: musdb18_fileprocessing.py 20 True
#This will convert the first twenty files in the source dir and override already existing files in the outputdir
maxCopy = -1
override = True
unmix_server = '//192.168.1.29/unmix-server'
print('Argument List:', str(sys.argv))
if sys.argv.__len__() == 2:
unmix_server = sys.argv[1]
sources = unmix_server + "/1_sources/MIR-1K/UndividedWavfile"
destination = unmix_server + "/2_prepared/MIR-1K"
if not exists(temp_path): makedirs(temp_path)
copy_files(sources, destination, maxCopy, override)
print('Finished converting')
|
#!/usr/bin/python
#from restkit import Resource, BasicAuth, request
import requests
import webservice.restclient
from requests.auth import HTTPBasicAuth
from issue import JiraRequest, JiraComment
import logging
import json
import re
import os
from utility import add_http
from urlparse import urljoin
from datetime import date, datetime, time, timedelta
logger = logging.getLogger("Notification")
class Jira_API:
API_URL = "/rest/api/2"
CREATE_TICKET = os.path.join(API_URL, "/issue")
@staticmethod
def get_create_project_meta_url(project_key = None):
if project_key:
return Jira_API.API_URL + "/issue/createmeta?projectKeys=%s" % project_key
return Jira_API.API_URL + "/issue/createmeta"
@staticmethod
def get_create_comment_url(issue_key):
return Jira_API.API_URL + "/issue/%s/comment" % issue_key
@staticmethod
def get_create_project_url():
return Jira_API.API_URL + "/issue/"
class Jira(webservice.restclient.TrRestClient):
def __init__(self, server_url, username, password):
logger.info("Creating %s instance. url : %s, user : %s" % (self.__class__.__name__,
server_url, username))
webservice.restclient.TrRestClient.__init__(self, server_url, username,
password)
self.account_id = ""
self.ticket_number = ""
def set_mandatory_fields(self, extra_info):
self.account_id = extra_info.account_id
self.issuetype = extra_info.issuetype
self.priority = extra_info.priority
def get_auth_token(self):
return HTTPBasicAuth(self.username, self.password)
def get_url(self, uri_stem):
print self.url
print add_http(self.url)
print urljoin(add_http(self.url), uri_stem)
return urljoin(add_http(self.url), uri_stem)
def get(self, uri_stem, response_code = 200):
auth_token = self.get_auth_token()
url = self.get_url(uri_stem)
response = requests.get(url=url, auth=auth_token)
logger.info("Method:GET, Response Code: %s, URL: %s" %
(response.status_code, url) )
if response.status_code != response_code:
logger.error("Failed to get data. Error Code: %s" % response.status_code)
raise Exception("Exception caught: Method:GET, Response Code: %s, URL: %s" %
(response.status_code, url))
return response.json()
def post(self, uri_stem, data, response_code = 200):
auth_token = self.get_auth_token()
url = self.get_url(uri_stem)
headers = {'Content-type': 'application/json'}
response = requests.post(url=url, auth=auth_token, data=data, headers=headers)
logger.info("Method:POST, Response Code: %s, URL: %s" %
(response.status_code, url) )
if response.status_code != response_code:
logger.error("Failed to post data. Error Code: %s" % response.status_code)
raise Exception("Exception caught: Method:POST, Response Code: %s, URL: %s" %
(response.status_code, url))
return response.json()
def get_existing_ticket_note_list(self, ticket_id):
ticket_note_list = []
uri_comments = Jira_API.get_create_comment_url(issue_key=ticket_id)
response = self.get(uri_comments)
for comment in response.get("comments"):
if "body" not in comment.keys():
continue
matchobj = re.findall("eventID\s*=\s*(\S+)", comment['body'], re.MULTILINE)
if matchobj:
ticket_note_list.append(matchobj[0].strip('"').upper())
return ticket_note_list
def add_ticket_notes(self, ticket_number, case):
logger.info("Jira: Adding / Updating Ticket Notes in ticket %s for case : %s(%s)"
% (ticket_number, case.name, case.case_id))
#get list of Existing tickets
ticket_note_list = self.get_existing_ticket_note_list(ticket_number)
logger.info("There are currently '%d' Ticket Notes in ticket number: %s"
% (len(ticket_note_list), ticket_number))
for event_id, event in case.events.iteritems():
if str(event_id).upper() not in ticket_note_list:
logger.info("Event Id: %s is new in ticket %s" %
((str(event_id).upper()), ticket_number))
jira_comment = JiraComment(str(event))
create_comment_uri = Jira_API.get_create_comment_url(issue_key=ticket_number)
fields = json.dumps(jira_comment, default=lambda o: o.__dict__)
logger.info(fields)
create_response = self.post(create_comment_uri, fields,response_code=201)
logger.info("Jira comment added for event %s" % str(event_id).upper())
def add_ticket(self, case):
logger.info("Adding Jira ticket for case: %s" % case.case_id)
create_uri = Jira_API.get_create_project_url()
jira_request = JiraRequest(self.account_id)
jira_request.prepare(title = case.get_ticket_title(max_length=250),
description = case.get_ticket_description(),
duedate = case.get_due_date().strftime("%Y-%m-%d %H:%M:%S"),
issuetype = "Task")
jira_request.print_ticket()
fields = json.dumps(jira_request, default=lambda o: o.__dict__)
logger.info(fields)
create_response = self.post(create_uri, fields,response_code=201)
return create_response['key']
def save(self, case):
if case.external_ticket and len(case.external_ticket) > 0:
logger.info("Jira: Updating Existing Ticket: %s for case : %s(%s)"
% (case.external_ticket, case.name, case.case_id))
self.add_ticket_notes(case.external_ticket, case)
self.ticket_number = case.external_ticket
else:
logger.info("Jira: Creating New Ticket for case : %s(%s)"
% (case.name, case.case_id))
self.ticket_number = self.add_ticket(case)
self.add_ticket_notes(self.ticket_number, case)
def get_due_date(self, delta = 72):
delta = delta if delta > 0 else 0
return datetime.combine(date.today(), datetime.now().time()) + timedelta(hours=delta)
def load_fields(self):
projects = []
uri_meta = Jira_API.get_create_project_meta_url()
response = self.get(uri_meta)
for project in response.get('projects'):
project_name = project.get("name")
project_id = project.get("key")
issue_types = []
for issuetype in project.get('issuetypes'):
issue_types.append(issuetype.get("name"))
projects.append({"name" : project_name, "id" : project_id, "issuetype" : issue_types})
return projects
def test(self):
create_uri = Jira_API.get_create_project_url()
jira_request = JiraRequest(self.account_id)
jira_request.prepare(title = "My Message",
description = "My Desc",
duedate = self.get_due_date().strftime("%Y-%m-%d %H:%M:%S"))
jira_request.print_ticket()
fields = json.dumps(jira_request, default=lambda o: o.__dict__)
logger.info(fields)
create_response = self.post(create_uri, fields,response_code=201)
print create_response['key']
# resource = Resource(url + ('/rest/api/latest/issue/%s' % key),
# pool_instance=None, filters=[self.get_auth_tiken()])
# auth_token = self.get_auth_token()
# print auth_token
# resp = requests.get('http://192.168.2.222:8080' + ('/rest/api/2/issue/%s/comment' % key),
# auth=auth_token)
# print resp.status_code
# print resp.json()
# # if resp.status_code != 200:
# # This means something went wrong.
# raise ApiError('GET /tasks/ {}'.format(resp.status_code))
# for todo_item in resp.json():
# print('{} {}'.format(todo_item['id'], todo_item['summary']))
|
import sys
sys.path.append("../")
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import argparse
import os
from models import *
from utils import load_pretrained_net, \
fetch_nearest_poison_bases, fetch_poison_bases, fetch_all_target_cls
from trainer import make_convex_polytope_poisons, train_network_with_poison
from PIL import Image, ExifTags
import cv2
import os
import torch
from sklearn.manifold import TSNE
class Logger(object):
def __init__(self, path):
self.terminal = sys.stdout
self.log = open(path, "a+")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
def fetch_all_external_targets(target_label, root_path, subset, start_idx, end_idx, num, transforms, device='cuda'):
target_list = []
indices = []
# target_label = [int(i == target_label) for i in range(10)]
if start_idx == -1 and end_idx == -1:
print("No specific indices are determined, so try to include whatever we find")
idx = 1
while True:
path = '{}_{}_{}.jpg'.format(root_path, '%.2d' % subset, '%.3d' % idx)
if os.path.exists(path):
img = Image.open(path)
target_list.append([transforms(img).to(device), torch.tensor([target_label])])
indices.append(idx)
idx += 1
else:
print("In total, we found {} images of target {}".format(len(indices), subset))
break
else:
assert start_idx != -1
assert end_idx != -1
for target_index in range(start_idx, end_idx + 1):
indices.append(target_index)
path = '{}_{}_{}.jpg'.format(root_path, '%.2d' % subset, '%.3d' % target_index)
assert os.path.exists(path), "external target couldn't find"
img = Image.open(path)
target_list.append([transforms(img)[None, :, :, :].to(device), torch.tensor([target_label]).to(device)])
i = math.ceil(len(target_list) / num)
return [t for j, t in enumerate(target_list) if j % i == 0], [t for j, t in enumerate(indices) if j % i == 0], \
[t for j, t in enumerate(target_list) if j % i != 0], [t for j, t in enumerate(indices) if j % i != 0]
if __name__ == '__main__':
# ======== arg parser =================================================
parser = argparse.ArgumentParser(description='PyTorch Poison Attack')
parser.add_argument('--gpu', default='0', type=str)
# The substitute models and the victim models
parser.add_argument('--end2end', default=False, choices=[True, False], type=bool,
help="Whether to consider an end-to-end victim")
parser.add_argument('--substitute-nets', default=['ResNet50', 'ResNet18'], nargs="+", required=False)
parser.add_argument('--victim-net', default=["DenseNet121"], nargs="+", type=str)
parser.add_argument('--model-resume-path', default='../models-chks-release', type=str,
help="Path to the pre-trained models")
parser.add_argument('--net-repeat', default=1, type=int)
parser.add_argument("--subs-chk-name", default=['ckpt-%s-4800.t7'], nargs="+", type=str)
parser.add_argument("--test-chk-name", default='ckpt-%s-4800.t7', type=str)
parser.add_argument('--subs-dp', default=[0], nargs="+", type=float,
help='Dropout for the substitute nets, will be turned on for both training and testing')
# Parameters for poisons
parser.add_argument('--target-path', default='../datasets/epfl-gims08/resized/tripod_seq', type=str,
help='path to the external images')
parser.add_argument('--target-index', default=6, type=int,
help='model of the car in epfl-gims08 dataset')
parser.add_argument('--target-start', default='-1', type=int,
help='first index of the car in epfl-gims08 dataset')
parser.add_argument('--target-end', default='-1', type=int,
help='last index of the car in epfl-gims08 dataset')
parser.add_argument('--target-num', default='5', type=int,
help='number of targets')
parser.add_argument('--target-label', default=1, type=int)
parser.add_argument('--poison-label', '-plabel', default=6, type=int,
help='label of the poisons, or the target label we want to classify into')
parser.add_argument('--poison-num', default=5, type=int,
help='number of poisons')
parser.add_argument('--poison-lr', '-plr', default=4e-2, type=float,
help='learning rate for making poison')
parser.add_argument('--poison-momentum', '-pm', default=0.9, type=float,
help='momentum for making poison')
parser.add_argument('--poison-ites', default=1000, type=int,
help='iterations for making poison')
parser.add_argument('--poison-decay-ites', type=int, metavar='int', nargs="+", default=[])
parser.add_argument('--poison-decay-ratio', default=0.1, type=float)
parser.add_argument('--poison-epsilon', '-peps', default=0.1, type=float,
help='maximum deviation for each pixel')
parser.add_argument('--poison-opt', default='adam', type=str)
parser.add_argument('--nearest', default=False, action='store_true',
help="Whether to use the nearest images for crafting the poison")
parser.add_argument('--subset-group', default=0, type=int)
parser.add_argument('--original-grad', default=True, choices=[True, False], type=bool)
parser.add_argument('--tol', default=1e-6, type=float)
# Parameters for re-training
parser.add_argument('--retrain-lr', '-rlr', default=0.1, type=float,
help='learning rate for retraining the model on poisoned dataset')
parser.add_argument('--retrain-opt', default='adam', type=str,
help='optimizer for retraining the attacked model')
parser.add_argument('--retrain-momentum', '-rm', default=0.9, type=float,
help='momentum for retraining the attacked model')
parser.add_argument('--lr-decay-epoch', default=[30, 45], nargs="+",
help='lr decay epoch for re-training')
parser.add_argument('--retrain-epochs', default=60, type=int)
parser.add_argument('--retrain-bsize', default=64, type=int)
parser.add_argument('--retrain-wd', default=0, type=float)
parser.add_argument('--num-per-class', default=50, type=int,
help='num of samples per class for re-training, or the poison dataset')
# Checkpoints and resuming
parser.add_argument('--chk-path', default='chk-black', type=str)
parser.add_argument('--chk-subdir', default='poisons', type=str)
parser.add_argument('--eval-poison-path', default='', type=str,
help="Path to the poison checkpoint you want to test")
parser.add_argument('--resume-poison-ite', default=0, type=int,
help="Will automatically match the poison checkpoint corresponding to this iteration "
"and resume training")
parser.add_argument('--train-data-path', default='../datasets/CIFAR10_TRAIN_Split.pth', type=str,
help='path to the official datasets')
parser.add_argument('--dset-path', default='datasets', type=str,
help='path to the official datasets')
parser.add_argument('--mode', default='convex', type=str,
help='if convex, run the convexpolytope attack proposed by the paper, '
'otherwise run the mean method')
parser.add_argument('--device', default='cuda', type=str)
args = parser.parse_args()
# Set visible CUDA devices
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
cudnn.benchmark = True
# load the pre-trained models
sub_net_list = []
for n_chk, chk_name in enumerate(args.subs_chk_name):
for snet in args.substitute_nets:
if args.subs_dp[n_chk] > 0.0:
net = load_pretrained_net(snet, chk_name, model_chk_path=args.model_resume_path,
test_dp=args.subs_dp[n_chk])
elif args.subs_dp[n_chk] == 0.0:
net = load_pretrained_net(snet, chk_name, model_chk_path=args.model_resume_path)
else:
assert False
sub_net_list.append(net)
print("subs nets, effective num: {}".format(len(sub_net_list)))
print("Loading the victims networks")
targets_net = []
for vnet in args.victim_net:
victim_net = load_pretrained_net(vnet, args.test_chk_name, model_chk_path=args.model_resume_path)
targets_net.append(victim_net)
cifar_mean = (0.4914, 0.4822, 0.4465)
cifar_std = (0.2023, 0.1994, 0.2010)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cifar_mean, cifar_std),
])
# Get the target images
_targets, targets_indices, eval_targets, val_targets_indices = fetch_all_external_targets(args.target_label,
args.target_path,
args.target_index,
args.target_start,
args.target_end,
args.target_num,
transforms=transform_test)
car_imgs, idxes = fetch_all_target_cls(args.target_label, 100, 'others', args.train_data_path, transform_test)
frog_imgs, idxes = fetch_all_target_cls(args.poison_label, 100, 'others', args.train_data_path, transform_test)
targets = [x for x, _ in _targets]
print("targets = ", targets[0].shape, len(targets), car_imgs[0].shape, len(car_imgs), frog_imgs[0].shape, len(frog_imgs))
#targets = torch.Tensor(targets).to(args.device)
#targets.resize_((19,3,32,32))
targets = torch.stack(targets)
car_imgs = torch.Tensor(car_imgs).to(args.device)
frog_imgs = torch.Tensor(frog_imgs).to(args.device)
with torch.no_grad():
for n_net, net in enumerate(sub_net_list):
target_img_feat = net.module.penultimate(targets)
car_imgs_feat = net.module.penultimate(car_imgs)
frog_imgs_feat = net.module.penultimate(frog_imgs)
break
print(target_img_feat.shape)
print(car_imgs_feat.shape, frog_imgs_feat.shape)
px = torch.cat((frog_imgs_feat, car_imgs_feat), dim=0)
px = torch.cat((target_img_feat, px), dim=0)
print(px.shape)
px = px.to('cpu')
#tsne_em = TSNE(n_components=2, perplexity=30.0, n_iter=1000, verbose=1).fit_transform(px)
#from bioinfokit.visuz import cluster
#cluster.tsneplot(score=tsne_em)
torch.save(px, 'feat_arr.pt')
|
#!/usr/bin/env python3
from ..lib import keyword_utils
'''
"arg.pre": {
"arg_num": n,
"feature": [...],
// "has_relation": true,
// "relations": [[0, 1], ...]
},
"feature" - e.g.
"feature": [
{
"check": {
// "check_cond": "",
"checked": false,
// "compared_with_const": 0,
// "compared_with_non_const": false
},
"is_alloca": false,
"is_global": false,
"is_constant": false,
// "arg_value": -1,
}
],
'''
def only_code_check(func_name, specification, complete_feature, doc_feature={}):
if specification == {}:
return False, ""
feature = complete_feature['arg.pre']
alarm_text = ""
arg_num = feature['arg_num']
args_need_to_check = specification['args_need_to_check']
if arg_num != len(args_need_to_check):
# internal error?
return False, ""
# Check the feature of per-argument
if not keyword_utils.is_post(func_name):
for num in range(arg_num):
alarm_text += check_arg_feature(feature['feature'][num], args_need_to_check[num][0], num, doc_feature)
else:
for num in range(arg_num):
if complete_feature['arg.pre']['feature'][num]['is_alloca']:
alarm_text += f"Potential: arg {num} is on stack and dealloced. "
return True, alarm_text
def check_arg_feature(feature, args_need_to_check, arg_index, doc_feature={}):
checked = feature['check']['checked']
doc_need_to_check = False if doc_feature == {} else doc_feature[arg_index]
if args_need_to_check or doc_need_to_check:
if not checked and not feature['is_global']: # and not feature['is_constant']:
return f"violate the most-frequent check for arg.{arg_index}.pre. "
return ""
|
'''
Demonstrating Euler's method
Name: Kevin Trinh
Goal: Approximate the solution to a differential equation.
'''
import matplotlib.pyplot as plt
def diffeq(x,y):
'''The ordinary differential equation that we are trying to approximate.'''
dydx = .1*x*y + .5*y
return dydx
def eulersMethod(func, x_0, y_0, h, end):
'''Use Euler's method on any given function. Stop approximating the solution
past a given x-value, end.'''
# for plotting purposes
x_array = []
y_array = []
# prepare for loop
steps = int((end - x_0) / h)
x = x_0
y = y_0
# main loop
while x <= end:
y += func(x,y)*h
x += h
y_array.append(y)
x_array.append(x)
return x_array, y_array
def plot():
'''Plot results of Euler's method with 5 different step sizes.'''
# obtain data
x1_array, y1_array = eulersMethod(diffeq, 0, 1, 1.0, 6)
x2_array, y2_array = eulersMethod(diffeq, 0, 1, 0.5, 6)
x3_array, y3_array = eulersMethod(diffeq, 0, 1, 0.25, 6)
x4_array, y4_array = eulersMethod(diffeq, 0, 1, 0.01, 6)
x5_array, y5_array = eulersMethod(diffeq, 0, 1, 0.001, 6)
# plot data
plt.plot(x1_array, y1_array, 'r-', label='h = 1.0')
plt.plot(x2_array, y2_array, 'b-', label='h = 0.5')
plt.plot(x3_array, y3_array, 'g-', label='h = 0.1')
plt.plot(x4_array, y4_array, 'y-', label='h = 0.01')
plt.plot(x5_array, y5_array, 'm-', label='h = 0.001')
# set graphing window, labels, and legend
ymax = max(y3_array)
plt.axis([1, 6, 0, ymax])
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
plot()
|
import os
import pathlib
#import tensorflow as tf
import numpy as np
import keras
import random
#import investpy as iv
import yfinance as yf
SAMPLE_P = 50
RSI_P = 14
EMA12_P = 12
EMA26_P = 26
SMA_P = 15
# Gets Moving average convergence divergence for list of prices
# Allows for training parameter, which also generates the correct predictions
def get_macd(prices, train=True):
SMA12 = 0
EMA12_prev = 0
EMA12 = 0
SMA26 = 0
EMA26_prev = 0
EMA26 = 0
MACD = []
n = 0
macd = []
m = 0
i = 0
if train:
a = 50
else:
a = 0
price_correct = []
while i < len(prices) - a:
p = prices[i]
if len(MACD) == SAMPLE_P - EMA26_P:
macd.append(MACD)
if train:
y = 100*(prices[i+30]-prices[i])/prices[i]
price_correct.append(y)
SMA12 = 0
EMA12_prev = 0
EMA12 = 0
SMA26 = 0
EMA26_prev = 0
EMA26 = 0
MACD = []
m += 1
n = 0
i = i - 45
else:
if n < EMA12_P:
SMA12 += p
elif n == EMA12_P:
SMA12 = SMA12 / EMA12_P
EMA12 = (p * (2 / (EMA12_P - 1))) + \
(SMA12 * (1 - (2 / (EMA12_P - 1))))
EMA12_prev = EMA12
else:
EMA12 = (p * (2 / (EMA12_P - 1))) + \
(EMA12_prev * (1 - (2 / (EMA12_P - 1))))
EMA12_prev = EMA12
if n < EMA26_P:
SMA26 += p
elif n == EMA26_P:
SMA26 = SMA26 / EMA26_P
EMA26 = (p * (2 / (EMA26_P - 1))) + \
(SMA26 * (1 - (2 / (EMA26_P - 1))))
EMA26_prev = EMA26
else:
EMA26 = (p * (2 / (EMA26_P - 1))) + \
(EMA26_prev * (1 - (2 / (EMA26_P - 1))))
EMA26_prev = EMA26
if n >= EMA26_P:
MACD.append(EMA12 - EMA26)
n += 1
i += 1
if train:
return macd, price_correct
else:
return macd
# Calculates list of Relative Strength index for given percentages
def get_rsi(percents, train=True):
rsi_gain = 0
rsi_loss = 0
rsi_init_gains = 0
rsi_init_losses = 0
rsi_prev_gain = 0
rsi_prev_loss = 0
RSI = []
i = 0
rsi = []
n = 0
m = 0
a = 0
if train:
a = 50
while i < len(percents) - a:
delta = percents[i]
if len(RSI) == SAMPLE_P - RSI_P:
rsi.append(RSI)
rsi_gain = 0
rsi_loss = 0
rsi_init_gains = 0
rsi_init_losses = 0
rsi_prev_gain = 0
rsi_prev_loss = 0
RSI = []
n = 0
m += 1
i = i - 45
else:
if n < RSI_P:
if delta < 0:
rsi_init_losses += delta
else:
rsi_init_gains += delta
elif n == RSI_P:
if rsi_init_losses == 0:
RSI.append(0)
rsi_prev_gain = 0
rsi_prev_loss = 0
else:
rsi_gain = rsi_init_gains / RSI_P
rsi_loss = -1 * rsi_init_losses / RSI_P
calc = 100 - (100 / (1 + (rsi_gain/rsi_loss)))
RSI.append(calc)
rsi_prev_gain = rsi_gain
rsi_prev_loss = rsi_loss
else:
gain = 0
loss = 0
if delta > 0:
gain = delta
else:
loss = -1 * delta
rsi_gain = (rsi_prev_gain * 13 + gain) / 14
rsi_loss = (rsi_prev_loss * 13 + loss) / 14
if rsi_loss == 0:
RSI.append(0)
else:
calc = 100 - (100 / (1 + (rsi_gain/rsi_loss)))
RSI.append(calc)
rsi_prev_gain = rsi_gain
rsi_prev_loss = rsi_loss
n += 1
i += 1
return rsi
def get_prediction(ticker):
price_map = yf.Ticker(ticker).history(period="max")
volumes = price_map['Volume'][1:].array
opens = price_map['Open'][1:].array
highs = price_map['High'][1:].array
lows = price_map['Low'][1:].array
closes = price_map['Close'][1:].array
percents = []
for o, c in zip(opens,closes):
percents.append(100 * (c - o)/o)
rsi = get_rsi(percents, train=False)
macd = get_macd(closes, train=False)
rsi = np.array(rsi)
macd = np.array(macd)
p = pathlib.Path(__file__).parent.absolute() / 'test_model_tf'#'spy_macd_rsi_v0'
model = keras.models.load_model(p)
pred_range = model.predict([rsi, macd]).squeeze()
pred_range.sort()
max_percentage = max(pred_range)
min_percentage = min(pred_range)
weighted_average_percentage = np.average(pred_range)
#print('MAX: ', max_percentage, ' MIN: ', min_percentage, ' AVG: ', weighted_average_percentage)
max_value = closes[-1] * (max_percentage + 1)
min_value = closes[-1] * (1 + min_percentage)
weighted_value = closes[-1] * (weighted_average_percentage + 1)
return (weighted_value, max_value, min_value )
|
# global variable
max_time = 0
min_time = 0
average_time = 0L
list_sed = []
list_rev = []
list_OK = []
file_flag = 0
file_size_tmp = 0
file_size = 0
file_input = ''
file_output = ''
Arg_A = ''
Arg_B = ''
Arg_mA = ''
Arg_mB = ''
Arg_mC = ''
g_time = 0L
g_number = -1 |
import uuid
from functools import wraps
# from peewee import ExecutionContext, Using
class _aio_callable_context_manager(object):
__slots__ = ()
def __call__(self, fn):
@wraps(fn)
async def inner(*args, **kwargs):
async with self:
return fn(*args, **kwargs)
return inner
# class AioExecutionContext(_aio_callable_context_manager, ExecutionContext):
# def __enter__(self):
# raise NotImplementedError()
# async def __aenter__(self):
# async with self.database._conn_lock:
# self.database.push_execution_context(self)
# self.connection = await self.database._connect(
# self.database.database,
# **self.database.connect_kwargs)
# if self.with_transaction:
# self.txn = self.database.transaction()
# await self.txn.__aenter__()
# return self
# def __exit__(self, exc_type, exc_val, exc_tb):
# raise NotImplementedError()
# async def __aexit__(self, exc_type, exc_val, exc_tb):
# async with self.database._conn_lock:
# if self.connection is None:
# self.database.pop_execution_context()
# else:
# try:
# if self.with_transaction:
# if not exc_type:
# self.txn.commit(False)
# await self.txn.__aexit__(exc_type, exc_val, exc_tb)
# finally:
# self.database.pop_execution_context()
# await self.database._close(self.connection)
# class AioUsing(AioExecutionContext, Using):
# def __enter__(self):
# raise NotImplementedError()
# async def __aenter__(self):
# self._orig = []
# for model in self.models:
# self._orig.append(model._meta.database)
# model._meta.database = self.database
# return super(Using, self).__aenter__()
# def __exit__(self, exc_type, exc_val, exc_tb):
# raise NotImplementedError()
# async def __aexit__(self, exc_type, exc_val, exc_tb):
# await super(Using, self).__aexit__(exc_type, exc_val, exc_tb)
# for i, model in enumerate(self.models):
# model._meta.database = self._orig[i]
class _aio_atomic(_aio_callable_context_manager):
__slots__ = ('conn', 'transaction_type', 'context_manager')
def __init__(self, conn, transaction_type=None):
self.conn = conn
self.transaction_type = transaction_type
async def __aenter__(self):
await self.conn.__aenter__()
if self.conn.transaction_depth() == 0:
self.context_manager = self.conn.transaction(self.transaction_type)
else:
self.context_manager = self.conn.savepoint()
return await self.context_manager.__aenter__()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.context_manager.__aexit__(exc_type, exc_val, exc_tb)
await self.conn.__aexit__(exc_type, exc_val, exc_tb)
class aio_transaction(_aio_callable_context_manager):
__slots__ = ('conn', 'autocommit', 'transaction_type')
def __init__(self, conn, transaction_type=None):
self.conn = conn
self.transaction_type = transaction_type
async def _begin(self):
if self.transaction_type:
await self.conn.begin(self.transaction_type)
else:
await self.conn.begin()
async def commit(self, begin=True):
await self.conn.commit()
if begin:
await self._begin()
async def rollback(self, begin=True):
await self.conn.rollback()
if begin:
await self._begin()
async def __aenter__(self):
self.autocommit = self.conn.autocommit
self.conn.autocommit = False
if self.conn.transaction_depth() == 0:
await self._begin()
self.conn.push_transaction(self)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
await self.rollback(False)
elif self.conn.transaction_depth() == 1:
try:
await self.commit(False)
except:
await self.rollback(False)
raise
finally:
self.conn.autocommit = self.autocommit
self.conn.pop_transaction()
class aio_savepoint(_aio_callable_context_manager):
__slots__ = ('conn', 'sid', 'quoted_sid', 'autocommit')
def __init__(self, conn, sid=None):
self.conn = conn
self.sid = sid or uuid.uuid4().hex
_compiler = conn.compiler() # TODO: breing the compiler here somehow
self.quoted_sid = _compiler.quote(self.sid)
async def _execute(self, query):
await self.conn.execute_sql(query, require_commit=False)
async def _begin(self):
await self._execute('SAVEPOINT %s;' % self.quoted_sid)
async def commit(self, begin=True):
await self._execute('RELEASE SAVEPOINT %s;' % self.quoted_sid)
if begin:
await self._begin()
async def rollback(self):
await self._execute('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid)
def __enter__(self):
raise NotImplementedError()
async def __aenter__(self):
self.autocommit = self.conn.get_autocommit()
self.conn.set_autocommit(False)
await self._begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError()
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
if exc_type:
await self.rollback()
else:
try:
await self.commit(begin=False)
except:
await self.rollback()
raise
finally:
self.conn.set_autocommit(self.autocommit)
|
print(ord(input()))#ASCII Value |
#changed 9/21/2019
# Learn this:
# 1. Project container & Management in Atom
# Atom has no projects, only folders.
# 2. Commiting Code changes into github.com from within Atom
# See video -->> https://www.youtube.com/watch?v=HZV7OKoD1Hc
# To get API token, log into atom.io and click name at top
# Neet to simply clone from github.com to my Desktop. Don't need token!
# 3. Master Python import tech well enough to use a separate class for the SQLite connection
# 4. Debugging Python in Atom
import datetime
from WebPg import *
import DB_Classes
#from DB_Classes. import dbConn
from DB_Classes.SQLite3 import KSQLite
curTime = datetime.datetime.now()
print ("Time is " + str(curTime))
x = Page1(38)
print (x.fox())
print (x.stringLength("seven"))
tiger = KSQLite(r'Thomas')
tiger.DBconnect(r'C:\Users\Loseke\Desktop\Code\SQLite_DBs\scrape.db')
|
import os
import sys
validDir = 0
found = False
def rename_file( dir, src, dest ):
def rename_all( root, items ):
for name in items:
try:
if src in name: # REPLACE WITH SOURCE CHARACTERS
global found
found = True
newName = name.replace(src, dest, 1)
os.rename(os.path.join(root, name), os.path.join(root, name.replace(src, dest, 1))) # REPLACE WITH BOTH SOURCE AND DESIRED CHARACTERS
print ("Renamed: {0} to {1}".format(name, newName))
except OSError as e:
print ("OSError ({0}): {1} - {2}".format(e.errno, e.strerror, name))
# starts from the bottom so paths further up remain valid after renaming
for root, dirs, files in os.walk( dir, topdown=False ):
rename_all(root,dirs)
rename_all(root,files)
# Existing argument based checks
#if len(sys.argv) != 4: # Check for too many arguments
# print ("Error: Invalid Arguments")
# print ("Usage: FilenameReplacer.py \\DirToRename\\ srcChar destChar")
while (validDir == 0):
if (sys.version_info.major < 3):
renamePath = raw_input("Enter directory of contents you wish to rename: ")
else:
renamePath = input("Enter directory of contents you wish to rename: ")
if (len(renamePath) == 0):
exit(0)
elif os.path.isdir(renamePath): # Check for valid Path
validDir = 1
if (sys.version_info.major < 3):
origName = raw_input("Enter name or string to replace: ")
newName = raw_input("Enter desired name or string: ")
else:
origName = input("Enter name or string to replace: ")
newName = input("Enter desired name or string: ")
rename_file(renamePath, origName, newName)
if (found == False):
print ("Nothing found matching: " + origName)
else:
print ("Error: Invalid Path")
print ("Example: C:\\DirToRename\\ or //home//user//folder//") |
import os
for i in range(0,10):
grp_cmd = """echo \""""+"""glg_tte_b1_bn171010792_v0"""+str(i)+""".pha1\n""" \
"""!"""+"""glg_tte_b1_bn171010792_v"""+str(i)+"""_pgstat.pha1\n"""\
+ """chkey RESPFILE """+"""glg_cspec_b1_bn171010792_v02.rsp2{3}\n""" \
+ """chkey BACKFILE """+"""glg_tte_b1_bn171010792_v0"""+str(i)+""".bak\n""" \
+"""exit\n\" | grppha"""
print grp_cmd
os.system(grp_cmd)
|
import csv
import re
import sys
import pandas as pd
import numpy as np
import collections
"""
family id
"""
if __name__ == '__main__':
data = pd.read_csv('data/test_5.csv')
data1 = pd.read_csv('data/train_5.csv')
for index,row in data.iterrows():
if type(row['Cabin']) is str:
data.ix[index,'Cabin'] = str(row['Cabin'][0])
else :
data.ix[index,'Cabin'] = str('N/A')
for index1,row1 in data1.iterrows():
if type(row1['Cabin']) is str:
data1.ix[index1,'Cabin'] = str(row1['Cabin'][0])
else :
data1.ix[index1,'Cabin'] = str('N/A')
data.to_csv('data/test_6.csv',mode = 'w',index = False)
data1.to_csv('data/train_6.csv',mode = 'w',index = False) |
import threading
threads = []
max_threads = 100
while threads or csv_reader:
for thread in threads:
if not thread.is_alive():
threads.remove(thread)
while len(threads) < max_threads and csv_reader:
thread = threading.Thread(target=insert_row, args=("sub",))
thread.setDaemon(True)
thread.start()
threads.append(thread) |
'''
单向链表反转:
使用三个指针 p q r
p 指向链表的开头 r = q
q 是 p的前一个节点
q = p
p向前移一位
p = p.next
q 连接到 r
q.next = r
'''
# include <stdio.h>
# include <stdlib.h>
import sys
class employee:
def __init__(self):
self.num = 0
self.salary = 0
self.name = ''
self.next = None
findword = 0
namedata = ['Allen', 'Scott', 'Marry', 'Jon', \
'Mark', 'Ricky', 'Lisa', 'Jasica', \
'Hanson', 'Amy', 'Bob', 'Jack']
data = [[1001, 32367], [1002, 24388], [1003, 27556], [1007, 31299], \
[1012, 42660], [1014, 25676], [1018, 44145], [1043, 52182], \
[1031, 32769], [1037, 21100], [1041, 32196], [1046, 25776]]
head = employee() # 建立链表头部
if not head:
print('Error!! 内存分配失败!!')
sys.exit(0)
head.num = data[0][0]
head.name = namedata[0]
head.salary = data[0][1]
head.next = None
ptr = head
for i in range(1, 12): # 建立链表
newnode = employee()
newnode.num = data[i][0]
newnode.name = namedata[i]
newnode.salary = data[i][1]
newnode.next = None
ptr.next = newnode
ptr = ptr.next
ptr = head
i = 0
print('反转前的员工链表节点数据:')
while ptr != None: # 打印链表数据
print('[%2d %6s %3d] => ' % (ptr.num, ptr.name, ptr.salary), end='')
i = i + 1
if i >= 3: # 三个元素为一行
print()
i = 0
ptr = ptr.next
ptr = head
before = None
print('\n反转后的链表节点数据:')
while ptr != None: # 链表反转,利用三个指针
last = before
before = ptr
ptr = ptr.next
before.next = last
ptr = before
while ptr != None:
print('[%2d %6s %3d] => ' % (ptr.num, ptr.name, ptr.salary), end='')
i = i + 1
if i >= 3:
print()
i = 0
ptr = ptr.next
|
import os
import json
basedir = os.path.abspath(os.path.dirname(__file__))
with open(basedir + "/dev.json") as json_file:
CONF = json.load(json_file)
API_NAME = CONF["application.name"]
SECRET_KEY = CONF["auth.config"]["secret_key"]
UUID_LEN = CONF["auth.config"]["uuid_len"]
UUID_ALPHABET = ''.join(map(chr, range(48, 58)))
TOKEN_EXPIRES = CONF["auth.config"]["token_expires"]
DATABASE = CONF["database.config"]
DB_CONFIG = (DATABASE["user"], DATABASE["password"], DATABASE["host"], DATABASE["database"])
DATABASE_URL = "postgresql+psycopg2://%s:%s@%s/%s" % DB_CONFIG
DB_ECHO = True if DATABASE['echo'] == "yes" else False
DB_AUTOCOMMIT = True if DATABASE['autocommit'] == "yes" else False
DB_POOL_RECYCLE = DATABASE["pool_recycle"]
DB_POOL_SIZE = DATABASE["pool_size"]
DB_POOL_TIMEOUT = DATABASE["pool_timeout"]
DB_POOL_MAX_OVERFLOW = DATABASE["max_overflow"]
LOG_LEVEL = CONF['log.config']['level'] |
# -*- coding:utf-8 -*-
'''
求可装入背包的物品的最大总和
'''
import numpy as np
def max_weight_in_bag(goods_weight, max_weight):
goods_num = len(goods_weight)
states = np.zeros((goods_num, max_weight + 1))
states[0][0] = 1
if goods_weight[0] < max_weight:
states[0][goods_weight[0]] = 1
for i in range(1, goods_num):
for j in range(max_weight + 1):
if states[i-1][j] == 1:
states[i][j] = 1
for j in range(max_weight - goods_weight[i] + 1):
if states[i-1][j] == 1:
states[i][j + goods_weight[i]] = 1
cols = max_weight
while cols >= 0:
if states[goods_num-1][cols] == 1:
return cols
cols -= 1
goods_num = 5 # 物品总个数
goods_weight = [2,2,6,2] # 每个物品的重量
max_weight = 9 # 背包中最大重量
num = max_weight_in_bag(goods_weight, max_weight)
print('max weight:',num)
exit()
states = np.zeros((5,10))
states[0][0] = 1 # 初始化第0个物品不入背包状态,行表示第i个物品,列表示入背包后的重量,值为标识状态
if goods_weight[0] < max_weight:
states[0][goods_weight[0]] = 1 # 初始化第0个物品入背包状态
for i in range(1,goods_num): # 从第1个物品开始处理
for j in range(max_weight+1): # 不把第i个物品放入背包,背包重量与上第i-1次时一致
if states[i-1][j] == 1:
states[i][j] = states[i-1][j]
for j in range(max_weight - goods_weight[i] + 1): # 把第i个物品放入背包,背包重量等于上次背包重量+当前物品重量
if states[i-1][j] == 1:
states[i][j+goods_weight[i]] = 1
print(states)
cols = max_weight
while cols >= 0:
if states[goods_num-1][cols] == 1:
print('当前物品最多装:', cols)
exit()
cols -= 1
print('None')
|
import unittest
import mock
from harrison.util.inspectlib import stack_frame_info
def stack_frame_test_func(stacklevel):
return stack_frame_info(stacklevel)
class TestStackFrame(unittest.TestCase):
def test_stack_frame_info(self):
there = stack_frame_test_func(1)
# Argh, might be .pyc, or might be .py.
self.assertRegexpMatches(there.filename, 'test_inspectlib.pyc?')
self.assertEqual(there.module_name, __name__)
self.assertEqual(there.function_name, 'stack_frame_test_func')
here = stack_frame_test_func(2)
# Argh, might be .pyc, or might be .py.
self.assertRegexpMatches(here.filename, 'test_inspectlib.pyc?')
self.assertEqual(here.module_name, __name__)
self.assertEqual(here.function_name, 'test_stack_frame_info')
self.assertLess(there.line_number, here.line_number)
self.assertRegexpMatches(
here.pretty,
r'test_inspectlib.py:\d+ in \S*test_inspectlib.test_stack_frame_info$'
)
with self.assertRaises(ValueError):
stack_frame_test_func(0)
@mock.patch('inspect.getmodule')
def test_stack_frame_info_works_when_module_can_not_be_identified(self, mock_getmodule):
mock_getmodule.return_value = None
stack_frame_test_func(1)
|
#!/usr/bin/env python
import sys
from helpers import dissect_file, interval_tree_to_hierarchy
def print_recur(hnode, depth=0):
indent = depth*' '
length = hnode.end - hnode.begin
lengthStr = '%d'%length if length < 16 else '0x%X'%length
print('[%08X, %08X) %s(len=%s type=%s) %s' % (hnode.begin, hnode.end, indent, lengthStr, hnode.type_, hnode.comment))
for child in sorted(hnode.children, key=lambda x: x.begin):
print_recur(child, depth+1)
if __name__ == '__main__':
tree = dissect_file(sys.argv[1])
root = interval_tree_to_hierarchy(tree)
print_recur(root)
|
# Learning heuristic
import math as m
import time
import cvrp.const as const
import cvrp.ReadWrite as rw
import cvrp.utile as utile
import cvrp.learning as learn
import cvrp.route as route
import cvrp.linKernighan as LK
import cvrp.ejectionChain as EC
import cvrp.crossExchange as CE
import cvrp.ClarkeWright as CW
import cvrp.optimisation as opt
def learning_heuristic():
# compute global variables
instance, demand = const.instance, const.demand
namefile = const.namefile
costs = 0
all_sol = []
fixed_edges = []
BaseSolution = []
tps_deb = time.time()
# learning
initial = CW.init_routes()
edges, param = learn.learning_results(
0.7, 2, 50, initial, const.typeBase, const.percent, const.learningCriterion)
initial_routes = route.complete(utile.destruction(
utile.ignore_0(edges)))
tps_learn = time.time()
rw.writef(namefile, 'Learning time = ' + str(tps_learn-tps_deb))
# start
cpt = 0
for i in range(const.NbIterations):
print(i)
edges = []
(lam, mu, nu) = param[0] # best tuple of the learning phase
BaseSolution = opt.optimisation_heuristic(
route.copy_sol(initial_routes), lam, mu, nu, fixed_edges)
all_sol += BaseSolution
# conserve best and worst costs
stat = [BaseSolution[0][0], BaseSolution[-1][0]]
# New learning phase
quality = (stat[1]-stat[0])/10 + stat[0]
crit = max(const.upBound-cpt/10, const.lowBound)
cpt += 1
if crit == const.lowBound:
cpt = 0
ls_qual = learn.learning_set_quality(BaseSolution, quality)
mat_qual = learn.init_matrix(len(instance))
mat_qual = learn.learn(mat_qual, ls_qual)
e_qual = learn.mat_info_rg(int(len(demand)*crit), mat_qual)
initial_routes = route.complete(utile.destruction(
utile.ignore_0(e_qual)))
# write results in a file
all_sol.sort()
tps_fin = time.time()
print(tps_fin-tps_deb)
costs = 0
for i in range(10):
c_sol, sol = all_sol[i]
costs += c_sol
rw.writef(namefile, '')
rw.writef(namefile, 'res = ' + str(round(c_sol, 3)))
rw.writef(namefile, 'res_int = ' +
str(round(route.cost_sol(sol, "Int"))))
rw.writef(namefile, 'solution = ' + str(sol))
rw.writef(namefile, '')
rw.writef(namefile, 'Mean = ' + str(costs/10))
rw.writef(namefile, 'Execution = ' + str(tps_fin-tps_deb))
rw.writef(namefile, '')
|
import xml.etree.ElementTree as ET
root = ET.parse('merged.xml').getroot()
data_tsv = open('/home/feasinde/Git/COMP490/dataset/dataset_en.tsv', 'w')
data_tsv.write('en\tfr\n')
for speaker in root:
for chunk in speaker:
if chunk.getchildren() != []:
connectives = [x.text for x in chunk[0]]
sentence = [x for x in chunk[0].itertext()]
fr_sentence = ''.join(chunk[1].itertext()) + '\n'
for connective in connectives:
mod_sentence = list(sentence)
index_of_connective = mod_sentence.index(connective)
mod_sentence[index_of_connective] = "<b>"+mod_sentence[index_of_connective]+"</b>"
data_tsv.write(''.join(mod_sentence) +'\t'+fr_sentence)
data_tsv.close()
|
from dao.nutzer_dao import NutzerDao
from model.nutzer_model import NutzerKassenModel
from PyQt5.QtCore import *
from dao.kassen_dao import KassenDao
from dao.historie_dao import HistorieDao
import locale
import logging
from datetime import datetime
class KassenController(QObject):
kasseAktualisieren = pyqtSignal(str)
def __init__(self, nk_model, nutzermodel):
QObject.__init__(self)
self._nk_model = nk_model
self._nutzermodel = nutzermodel
self._nutzerdao = NutzerDao()
self._kassendao = KassenDao()
self._historiedao = HistorieDao()
@pyqtSlot()
def getUsers(self):
user_data = self._nutzerdao.select_users() #nutzerdaten aus db holen
if self._nk_model.rowCount() != 0:
while self._nk_model.rowCount() != 0:
self._nk_model.deleteNutzer(0)
for i in range(len(user_data)):
self._nk_model.addNutzer(user_data[i]['Name'], user_data[i]['Konto'], user_data[i]['Verein'], f"src/{user_data[i]['Bild']}")
@pyqtSlot(str, str, str, str)
def einzahlen(self, name, geld, konto, kasse):
if '€' in geld:
geld = geld.replace('€', '')
if ',' in geld:
geld = geld.replace(',', '.')
if '€' in kasse:
kasse = kasse.replace('€', '')
if ',' in kasse:
kasse = kasse.replace(',', '.')
now = datetime.now()
date = now.strftime("%d/%m/%Y %H:%M:%S")
self._historiedao.create_content(date, 'Einzahlung', name, geld)
#logging.warning(f'Einzahlung von: {name} Betrag: {geld}')
neuKasse = "{:10.2f}".format(float(kasse) + float(geld))
konto = konto.replace('Kontostand:', '')
neuKonto = "{:10.2f}".format(float(konto) + float(geld))
name = name.replace(' ', '')
self._nutzerdao.transaction(name, neuKonto)
self._kassendao.edit_geld(neuKasse)
@pyqtSlot()
def getKasse(self):
geld = self._kassendao.select_geld()
geld = "{:.2f}€".format(geld)
self.kasseAktualisieren.emit(geld)
@pyqtSlot(str, str, str, int)
def abrechnen(self, verwendung, geld, name, currentIndex):
if '€' in geld:
geld = geld.replace('€', '')
if ',' in geld:
geld = geld.replace(',', '.')
now = datetime.now()
date = now.strftime("%d/%m/%Y %H:%M:%S")
self._historiedao.create_content(date, f'Kassenabrechnung Verwendung: {verwendung}', name, geld)
#logging.warning(f'Kassenabrechnung Verwendung: {verwendung} von: {name} Betrag: {geld}')
if name in 'Kasse':
kasse = self._kassendao.select_geld()
kasse = "{:10.2f}".format(float(kasse) - float(geld))
self._kassendao.edit_geld(kasse)
else:
konto = self._nutzermodel._konto[currentIndex]
konto = "{:10.2f}".format(float(konto) + float(geld))
bild = self._nutzermodel._bild[currentIndex]
mitglied = self._nutzermodel._mitglied[currentIndex]
self._nutzerdao.edit_user(name, bild, mitglied, float(konto))
|
import re
class thSlurp:
def __init__(self):
self.triggers = []
self.flagged = None
def registerTrigger(self, patt='', cback=''):
if patt == '' or cback == '':
return False
else:
try: self.triggers.append((patt, cback))
except: return False
return True
def unregisterTrigger(self, patt='', cback=''):
if patt == '' or cback == '':
return False
else:
for i in range(0, len(self.triggers)):
if self.triggers[i][0] == patt:
self.triggers.pop(i)
return True
def setFlag(self, index=None):
if index == None:
return False
else:
self.registerTrigger('^.*', self.triggers[index][1])
if self.flagged == None: self.flagged = index
else: return False
return True
def unsetFlag(self, index=None):
if index == None:
return False
else:
if self.flagged == None:
return False
else:
self.flagged = None
self.unregisterTrigger('^.*', self.triggers[index][1])
return True
def getIndex(self, pattern=None):
if pattern != None:
for i in range(0,(len(self.triggers)-1)):
if self.triggers[i][0] == pattern:
return i
return None
else: return None
def process(self, fp=None):
if fp != None:
bf = fp.readline()
while bf != '':
for i in self.triggers:
if re.match(i[0],bf.strip()):
i[1](bf.strip(), self.getIndex(i[0]))
bf = fp.readline()
class thConfig:
def __init__(self, file=''):
self.file = file
self.fp = open(file, 'r')
self.slurp()
self.fp.close()
def slurp(self):
self.conf = {}
section = ''
data = self.fp.readlines()
for line in data:
if line.strip() != '' and line[0] != '#':
if re.match("^.*{", line) != None:
self.conf[line.split()[0]] = {}
section = line.split()[0].strip()
elif re.match("^.*=.*", line) != None and section != '':
self.conf[section][line.split('=')[0].strip()] = line.split('=')[1].strip()
elif re.match("^.}", line) != None:
section = ''
return True
def lookup(self, sect=None, key=None):
if sect == None or key == None:
return False
else:
try:
return self.conf[sect][key]
except:
return False
def set(self, sect=None, key=None, val=None):
if sect == None or key == None or val == None:
return False
else:
if not self.conf[sect]:
self.conf[sect] = {}
self.conf[sect][key] = val
|
#!/usr/bin/python
# Libraries
from PIL import Image, ImageTk
from math import sqrt, floor, ceil, pi, sin, cos, tan, atan, atan2, radians, degrees
from random import random, randint, choice
import numpy
from time import time
from sys import maxint
# ======= solid colors =======
WHITE = (255,255,255)
BLACK = (0,0,0)
RED = (255,0,0)
GREEN = (0,255,0)
BLUE = (0,0,255)
YELLOW = (255,255,0)
GRAY = (128,128,128)
# ============================
def percentage(x,y):
return round(float(y*100.0/x),2)
def slicing(l, n):
return [l[a:a+n] for a in xrange(0, len(l), n)]
def de_slicing(p):
pixels = list()
for a in p:
pixels += a
return pixels
def array2list(a):
aS = a.shape
newPixels = list()
for y in xrange(aS[0]):
for x in xrange(aS[1]):
newPixels.append(tuple([int(v) for v in a[y,x]]))
return newPixels
def turn(p1, p2, p3):
return cmp((p2[0] - p1[0])*(p3[1] - p1[1]) - (p3[0] - p1[0])*(p2[1] - p1[1]), 0)
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def graham_scan(points):
u = list()
l = list()
points.sort()
for p in points:
while len(u) > 1 and turn(u[-2], u[-1], p) <= 0:
u.pop()
while len(l) > 1 and turn(l[-2], l[-1], p) >= 0:
l.pop()
u.append(p)
l.append(p)
l = l[1:-1][::-1]
l += u
return l
def getNeighbours(pixels, y, x):
neighbours = list()
try: neighbours.append(pixels[y-1][x-1])
except IndexError: pass
try: neighbours.append(pixels[y-1][x])
except IndexError: pass
try: neighbours.append(pixels[y-1][x+1])
except IndexError: pass
try: neighbours.append(pixels[y+1][x-1])
except IndexError: pass
try: neighbours.append(pixels[y+1][x])
except IndexError: pass
try: neighbours.append(pixels[y+1][x+1])
except IndexError: pass
try: neighbours.append(pixels[y][x-1])
except IndexError: pass
try: neighbours.append(pixels[y][x+1])
except IndexError: pass
return neighbours
def normalize(pixels):
newPixels = list()
maximum = map(max, zip(*pixels))
minimum = map(min, zip(*pixels))
div = tuple([a-b for a,b in zip(maximum, minimum)])
for pixel in pixels:
newPixels.append(tuple([(p-m)/d for p,m,d in zip(pixel, minimum, div)]))
return newPixels
def euclidean(values, shape):
newValues = numpy.zeros(shape=shape)
for y in xrange(shape[0]):
for x in xrange(shape[1]):
pixel = sum([(value[y,x]**2) for value in values])
pixel = tuple([int(floor(sqrt(p))) for p in pixel])
newValues[y,x] = pixel
return newValues
def grayscale(pixels, lmin=0, lmax=255):
for a, pixel in enumerate(pixels):
color = sum(pixel)/3
color = 255 if(color >= lmax) else color
color = 0 if(color <= lmin) else color
pixels[a] = (color, color, color)
return pixels
def blurPixel(pixels):
newPixel = (sum([pixel[0] for pixel in pixels])/len(pixels),\
sum([pixel[1] for pixel in pixels])/len(pixels),\
sum([pixel[2] for pixel in pixels])/len(pixels))
return newPixel
def blur(pixels, width, height):
newPixels = list()
pixels = slicing(pixels, width)
for y, pLine in enumerate(pixels):
print str(percentage(height,y))
for x, pixel in enumerate(pLine):
pNeighbours = getNeighbours(pixels, y, x)
pNeighbours.append(pixel)
newPixel = blurPixel(pNeighbours)
newPixels.append(newPixel)
return newPixels
def negative(pixels, cMax=255):
for a, pixel in enumerate(pixels):
pixels[a] = tuple([cMax-p for p in pixel])
return pixels
def sepia(pixels):
pixels = grayscale(pixels)
values = (1.0, 1.2, 2.0)
for a, pixel in enumerate(pixels):
pixels[a] = tuple([int(p/v) for p,v in zip(pixel, values)])
return pixels
def noise(pixels, level, intensity):
level *= 0.01
intensity *= 13
for a, pixel in enumerate(pixels):
if(random() < level):
color = (0+intensity) if random() < 0.5 else (255-intensity)
pixel = (color, color, color)
pixels[a] = pixel
return pixels
def removeNoise(pixels, width, height, aggressiveness):
aggressiveness *= 10
newPixels = list()
pixels = slicing(pixels, width)
for y, pLine in enumerate(pixels):
print str(percentage(height,y))
for x, pixel in enumerate(pLine):
pNeighbours = getNeighbours(pixels, y, x)
newPixel = blurPixel(pNeighbours)
a1 = abs(newPixel[0] - pixel[0])
a2 = abs(newPixel[1] - pixel[1])
a3 = abs(newPixel[2] - pixel[2])
if(a1>aggressiveness and a2>aggressiveness and a3>aggressiveness):
newPixels.append(newPixel)
else:
newPixels.append(pixel)
return newPixels
def difference(pixels, width, height):
pixelsOr = grayscale(pixels)
pixelsBG = blur(pixelsOr, width, height)
newPixels = list()
for a, pixel in enumerate(pixelsOr):
newPixel = tuple([p1-p2 for p1,p2 in zip(pixelsOr[a], pixelsBG[a])])
newPixels.append(newPixel)
return grayscale(newPixels, lmin=10, lmax=10)
def convolution2D(f,h):
fS, hS = f.shape, h.shape
F = numpy.zeros(shape=fS)
for y in xrange(fS[0]):
print str(percentage(fS[0],y))
for x in xrange(fS[1]):
mSum = numpy.array([0.0, 0.0, 0.0])
for j in xrange(hS[0]):
j2 = j-(hS[0]/2)
for i in xrange(hS[1]):
i2 = i-(hS[0]/2)
try:
mSum += f[y+j2,x+i2]*h[j,i]
except IndexError: pass
F[y,x] = mSum
return F
def applyMask(pixels, width, gray=True):
if(gray): pixels = grayscale(pixels)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/1.0
mask = numpy.array([[0.0, 1.0, 0.0],[1.0, -6.0, 1.0],[0.0, 1.0, 0.0]]) * n
newPixels = array2list(convolution2D(pixels, mask))
return newPixels
def borderDetection(pixels, width):
#start = time()
pixels = grayscale(pixels)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/1.0
mask1 = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * n
mask2 = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * n
g1 = convolution2D(pixels, mask1)
g2 = convolution2D(pixels, mask2)
newPixels = grayscale(array2list(euclidean([g1,g2], pS)))
#end = time()
#print (end - start)
return newPixels
def bfs(pixels, visited, coordinates, newColor, width, height):
queue = [coordinates]
original = pixels[coordinates[1]][coordinates[0]]
massPixels = list()
while(len(queue) > 0):
(x,y) = queue.pop(0)
pColor = pixels[y][x]
if(pColor == original or pColor == newColor):
for dy in [-1,0,1]:
for dx in [-1,0,1]:
(i,j) = (x + dx, y + dy)
if(i >= 0 and i < width and j >= 0 and j < height):
contenido = pixels[j][i]
if(contenido == original):
pixels[j][i] = newColor
queue.append((i,j))
visited[j][i] = 1
massPixels.append((i,j))
return pixels, visited, massPixels
def objectDetection(pixels, width, height, coordinates):
pixels = slicing(pixels, width)
visited = [[0 for b in xrange(width)] for a in xrange(height)]
color = (randint(0,255), randint(0,255), randint(0,255))
pixels, visited, objPixels = bfs(pixels, visited, coordinates, color, width, height)
return de_slicing(pixels), visited, objPixels
def objectClassification(pixels, width, height, color=BLACK):
pixels = slicing(pixels, width)
visited = [[0 for b in xrange(width)] for a in xrange(height)]
objects = list()
objID = 1
for y in xrange(height):
print str(percentage(height,y))
for x in xrange(width):
if(not visited[y][x] and pixels[y][x] == color):
detectionPoint = (x,y)
objColor = (randint(0,255), randint(0,255), randint(0,255))
pixels, visited, objPixels = bfs(pixels, visited, (x,y), objColor, width, height)
objSize = len(objPixels)
objPrcnt = percentage(width*height, objSize)
if(objPrcnt > 0.1):
ySum = sum(i for i,j in objPixels)
xSum = sum(j for i,j in objPixels)
objCenter = tuple([ySum/len(objPixels), xSum/len(objPixels)])
mObject = {"id":objID, "size":objSize, "percentage":objPrcnt, "center":objCenter, "pixels":objPixels, "dp":detectionPoint}
objects.append(mObject)
objID += 1
biggestObject = max(objects, key=lambda x:x["percentage"])
for p in biggestObject["pixels"]:
pixels[p[1]][p[0]] = GRAY
return de_slicing(pixels), objects
def houghLines(pixels, width, height):
newPixels = list()
results = [[None for a in xrange(width)] for b in xrange(height)]
combinations = dict()
pixelsOr = slicing(pixels, width)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
maskX = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * 1.0/8.0
maskY = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * 1.0/8.0
Gx = convolution2D(pixels, maskX)
Gy = convolution2D(pixels, maskY)
for y in xrange(height):
for x in xrange(width):
gx = Gx[y,x][0]
gy = Gy[y,x][0]
if(abs(gx) + abs(gy) <= 0.0):
theta = None
else:
theta = atan2(gy,gx)
if theta is not None:
rho = ceil((x - width/2) * cos(theta) + (height/2 - y) * sin(theta))
theta = int(degrees(theta))/18
combination = ("%d"%(theta), "%d"%rho)
results[y][x] = combination
if x > 0 and y > 0 and x < width-1 and y < height-1:
if combination in combinations:
combinations[combination] += 1
else:
combinations[combination] = 1
else:
results[y][x] = (None, None)
frec = sorted(combinations, key=combinations.get, reverse=True)
frec = frec[:int(ceil(len(combinations) * 1.0))]
for y in xrange(height):
for x in xrange(width):
(ang, rho) = results[y][x]
if(ang, rho) in frec:
ang = int(ang)
if(ang == -10 or ang == 0 or ang == 10):
newPixels.append(RED)
elif(ang == -5 or ang == 5):
newPixels.append(BLUE)
else:
newPixels.append(GREEN)
else:
newPixels.append(GRAY)
return newPixels
def areCircles(radius, centers, borders):
sensibility = 25
newCenters = list()
angles = [round(radians(a),2) for a in range(0,361,9)]
for center in centers:
count = 0
for theta in angles:
x = int(radius * cos(theta)) + center[0]
y = int(radius * sin(theta)) + center[1]
s = (x,y)
s1 = (x,y+1)
s2 = (x+1,y)
s3 = (x+1,y+1)
s4 = (x-1,y-1)
s5 = (x,y-1)
s6 = (x-1,y)
if s in borders or s1 in borders or s2 in borders or s3 in borders or s4 in borders or s5 in borders or s6 in borders:
count = count + 1
if(count >= sensibility):
print "[!] Possible circle found ",center
newCenters.append(center)
return newCenters
def groupCircles(centers):
newCenters = list()
mainCenter = list()
rThreshold = 5
x, y = centers[0]
for a, center in enumerate(centers):
if(abs(center[0]-x) <= rThreshold and abs(center[1]-y) <= rThreshold):
mainCenter.append(center)
else:
newCenter = (sum([ce[0] for ce in mainCenter])/len(mainCenter),\
sum([ce[1] for ce in mainCenter])/len(mainCenter))
mainCenter = [center]
newCenters.append(newCenter)
#print "[!] Possible circle center found ", newCenter
try: x, y = centers[a]
except IndexError: return newCenters
if(a == len(centers)-1):
newCenter = (sum([ce[0] for ce in mainCenter])/len(mainCenter),\
sum([ce[1] for ce in mainCenter])/len(mainCenter))
newCenters.append(newCenter)
#print "[!] Possible circle center found ", newCenter
try: x, y = centers[a]
except IndexError: return newCenters
return newCenters
def groupRadius(circles):
newCircles = dict()
allRadius = sorted([int(r) for r in circles])
rThreshold = 3
pRadius = allRadius[0]
kRadius = str(pRadius)
nCircles = circles[kRadius]
nRadius = [pRadius]
for a, radius in enumerate(allRadius):
kRadius = str(radius)
if(abs(radius-pRadius) <= rThreshold):
nRadius.append(radius)
nCircles += circles[kRadius]
else:
nCircles = groupCircles(sorted(nCircles))
nRadius = sum(nRadius)/len(nRadius)
newCircles[str(nRadius)] = nCircles
try:
pRadius = allRadius[a]
kRadius = str(pRadius)
nCircles = circles[kRadius]
nRadius = [pRadius]
except IndexError:
return newCircles
if(a == len(allRadius)-1):
nCircles = groupCircles(sorted(nCircles))
nRadius = sum(nRadius)/len(nRadius)
newCircles[str(nRadius)] = nCircles
try:
pRadius = allRadius[a]
kRadius = str(pRadius)
nCircles = circles[kRadius]
nRadius = [pRadius]
except IndexError:
return newCircles
return newCircles
def groupVotes(votes, width, height):
for threshold in xrange(1, int(round(width * 0.1)),2):
agregado = True
while agregado:
agregado = False
for y in xrange(height):
for x in xrange(width):
v = votes[y][x]
if v > 1:
for dx in xrange(-threshold, threshold):
for dy in xrange(-threshold, threshold):
if not (dx == 0 and dy == 0):
if y + dy >= 0 and y + dy < height and x + dx >= 0 and x + dx < width:
w = votes[y + dy][x + dx]
if w > 0:
if v - threshold >= w:
votes[y][x] = v + w
votes[y + dy][x + dx] = 0
agregado = True
vMax = max(max(v) for v in votes)
vSum = sum(sum(v) for v in votes)
vAverage = vSum / (width * height)
threshold = (vMax + vAverage) / 2.0
groupedVotes = [(x,y) for x in xrange(width) for y in xrange(height) if votes[y][x] > threshold]
return groupedVotes
def houghCircles(pixels, Gx, Gy, width, height, radius):
votes = [[0 for a in xrange(width)] for b in xrange(height)]
for ym in xrange(height):
y = height /2- ym
for xm in xrange(width):
if(pixels[ym][xm] == WHITE):
x = xm - width / 2
gx = Gx[ym,xm][0]
gy = Gy[ym,xm][0]
g = sqrt(gx**2 + gy**2)
if(abs(g) > 0.0):
cosTheta = gx / g
sinTheta = gy / g
xc = int(round(x - radius * cosTheta))
yc = int(round(y - radius * sinTheta))
xcm = xc + width / 2
ycm = height / 2 - yc
if xcm >= 0 and xcm < width and ycm >= 0 and ycm < height:
votes[ycm][xcm] += 1
newPixels = groupVotes(votes, width, height)
return groupCircles(newPixels)
def circleDetection(pixels, width, height, radius=0):
circles = dict()
lPixels = pixels
pixelsOr = slicing(pixels, width)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/1.0
maskX = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * n
maskY = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * n
Gx = convolution2D(pixels, maskX)
Gy = convolution2D(pixels, maskY)
maxDiag = int(floor(sqrt(width**2 + height**2)))
if(radius == 0):
maxRadius = int(ceil(maxDiag * 0.3))
minRadius = int(ceil(maxDiag * 0.01))
borders = list()
p, objects = objectClassification(lPixels, width, height, color=WHITE)
for o in objects:
borders+=o["pixels"]
print "[O] Circle detection global advance >> %s%%"%(str(percentage(maxRadius,0)))
for r in range(minRadius, maxRadius):
lastCenters = houghCircles(pixelsOr, Gx, Gy, width, height, r)
if(len(lastCenters) > 0):
lastCircles = areCircles(r, lastCenters, borders)
if(len(lastCircles) > 0):
circles[str(r)] = lastCircles
print "[O] Circle detection global advance [r=%d] >> %s%%"%(r, str(percentage(maxRadius,r)))
circles = groupRadius(circles)
else:
circles[str(radius)] = houghCircles(pixelsOr, Gx, Gy, width, height, radius)
return circles
def calcTheta(gx,gy):
if(abs(gx) + abs(gy) <= 0.0):
theta = None
else:
theta = atan2(gy,gx)
while(theta < 0.0): theta += 2*pi
while(theta > 2*pi): theta -= 2*pi
theta = (theta - pi/2) * -1
return theta
def calcTangentLine(x, y, theta, l):
x1, y1 = x + (l * cos(theta)), y + (l * sin(theta))
x2, y2 = x + (l * cos(theta-pi)), y + (l * sin(theta-pi))
return (x1,y1), (x2,y2)
def itIntersect(line1, line2):
intersection = (0,0)
(x11,y11), (x12,y12) = line1["start"], line1["end"]
(x21,y21), (x22,y22) = line2["start"], line2["end"]
if(max(x11,x12) < min(x21,x22)):
return False, intersection
else:
a1 = (y11-y12)/(x11-x12)
a2 = (y21-y22)/(x21-x22)
if(a1 == a2):
return False, intersection
else:
b1 = y11 - a1 * x11
b2 = y21 - a2 * x21
xA = (b2-b1)/(a1-a2)
if(xA < max(min(x11,x12),min(x21,x22)) or xA > min(max(x11,x12),max(x21,x22))):
return False, intersection
else:
yA = a1 * xA + b1
return True, (xA, yA)
def calcEllipse(c, pixels):
center = c[0]
minD, maxD = maxint, 0
ellipse = {"center":center}
for pixel in pixels:
d = sqrt((center[0]-pixel[0])**2 + (center[1]-pixel[1])**2)
if(d < minD):
minD = d
ellipse["minD"] = pixel
ellipse["minSemiD"] = d*2
ellipse["minTheta"] = calcTheta(pixel[0], pixel[1])
if(d > maxD):
maxD = d
ellipse["maxD"] = pixel
ellipse["maxSemiD"] = d*2
ellipse["maxTheta"] = calcTheta(pixel[0], pixel[1])
dx, dy = (center[0] - ellipse["minD"][0]), (center[1] - ellipse["minD"][1])
ellipse["minD"] = (ellipse["minD"], (center[0]+(1*dx),center[1]+(1*dy)))
dx, dy = (center[0] - ellipse["maxD"][0]), (center[1] - ellipse["maxD"][1])
ellipse["maxD"] = (ellipse["maxD"], (center[0]+(1*dx),center[1]+(1*dy)))
b = ellipse["minD"] + ellipse["maxD"]
minX, minY = min(p[0] for p in b), min(p[1] for p in b)
maxX, maxY = max(p[0] for p in b), max(p[1] for p in b)
ellipse["box"] = (minX, minY, maxX, maxY)
return ellipse
def houghEllipses(pixels, borders, Gx, Gy, width, height):
minX, minY = min(p[0] for p in borders), min(p[1] for p in borders)
maxX, maxY = max(p[0] for p in borders), max(p[1] for p in borders)
maxDiag = int(floor(sqrt(width**2 + height**2)))
threshold = 20
results = [[0 for a in xrange(width)] for b in xrange(height)]
lines, origins, points = list(), list(), list()
for a, p1 in enumerate(borders):
x1, y1 = p1[0], p1[1]
gx1 = Gx[y1,x1][0]
gy1 = Gy[y1,x1][0]
theta1 = calcTheta(gx1, gy1)
for b, p2 in enumerate(borders):
dx = abs(p1[0]-p2[0])
dy = abs(p1[1]-p2[1])
if(p1 != p2 and dx > threshold and dy > threshold and b%2 == 0):
x2, y2 = p2[0], p2[1]
gx2 = Gx[y2,x2][0]
gy2 = Gy[y2,x2][0]
theta2 = calcTheta(gx2, gy2)
if(theta1 is not None and theta2 is not None):
line1 = {"origin":(x1,y1), "theta":theta1}
line2 = {"origin":(x2,y2), "theta":theta2}
line1["start"], line1["end"] = calcTangentLine(x1, y1, theta1, maxDiag)
line2["start"], line2["end"] = calcTangentLine(x2, y2, theta2, maxDiag)
flag, T = itIntersect(line1, line2)
if(flag):
M = ((x1+x2)/2, (y1+y2)/2)
xT, yT = T
xM, yM = M
if(xT != xM and yT != yM):
p = list()
dx, dy = (xM - xT), (yM - yT)
mD = min(dx, dy)
dx, dy = dx/mD, dy/mD
#print "dx = %f, dy = %f, mD = %f"%(dx,dy,mD)
m = dy/dx
theta3 = tan(dy/dx)
line3 = {"origin":T, "start":T, "end":M, "theta":theta3}
line4 = {"origin":M, "start":M, "theta":theta3}
line4["end"] = (M[0] - 10, (m * ((M[0]-10) - xM)) + yM)
line4["end"] = (M[0]+(-100*dx),M[1]+(-100*dy))
for c in range(maxDiag):
#xV = xT#xM + c
#yV = yT#(m * (xV - xM)) + yM
xV, yV = int(ceil(M[0]+(-c*dx))), int(ceil(M[1]+(-c*dy)))
if((xV,yV) in borders or xV <= minX or yV <= minY or xV >= maxX or yV >= maxY):
break
else:
p.append((xV, yV))
results[yV][xV] += 1
lines.append((line1["start"], line1["end"]))
lines.append((line2["start"], line2["end"]))
lines.append((line3["start"], line3["end"]))
lines.append((line4["start"], line4["end"]))
points.append(p)
origins.append(line1["origin"])
origins.append(line2["origin"])
origins.append(line3["origin"])
origins.append(line4["origin"])
else:
continue
else:
continue
else:
continue
else:
continue
print "[O] Ellipse detection partial advance >> %s%%"%(str(percentage(len(borders),a)))
votes = groupVotes(results, width, height)
return lines, points, origins, votes
def houghEllipses2(pixels, borders, Gx, Gy, width, height):
minX, minY = min(p[0] for p in borders), min(p[1] for p in borders)
maxX, maxY = max(p[0] for p in borders), max(p[1] for p in borders)
maxDiag = int(floor(sqrt(width**2 + height**2)))
threshold = int(ceil(len(borders)*0.15))
results = [[0 for a in xrange(width)] for b in xrange(height)]
lines, origins, points = list(), list(), list()
b1 = sorted(borders, key=lambda x: x[1])
b2 = sorted(borders, key=lambda x: x[1], reverse=True)
#b1 = b[:len(b)/2]
#b2 = b[len(b)/2:]
for a in range(1000):
p1, p2 = choice(borders), choice(borders)
x1, y1 = p1[0], p1[1]
x2, y2 = p2[0], p2[1]
gx1 = Gx[y1,x1][0]
gy1 = Gy[y1,x1][0]
gx2 = Gx[y2,x2][0]
gy2 = Gy[y2,x2][0]
theta1 = calcTheta(gx1, gy1)
theta2 = calcTheta(gx2, gy2)
if(theta1 is not None and theta2 is not None):
line1 = {"origin":(x1,y1), "theta":theta1}
line2 = {"origin":(x2,y2), "theta":theta2}
line1["start"], line1["end"] = calcTangentLine(x1, y1, theta1, maxDiag)
line2["start"], line2["end"] = calcTangentLine(x2, y2, theta2, maxDiag)
flag, T = itIntersect(line1, line2)
if(flag):
M = ((x1+x2)/2, (y1+y2)/2)
xT, yT = T
xM, yM = M
if(xT != xM and yT != yM):
p = list()
dx, dy = (xM - xT), (yM - yT)
mD = min(dx, dy)
dx, dy = dx/mD, dy/mD
#print "dx = %f, dy = %f, mD = %f"%(dx,dy,mD)
m = dy/dx
theta3 = tan(dy/dx)
line3 = {"origin":T, "start":T, "end":M, "theta":theta3}
line4 = {"origin":M, "start":M, "theta":theta3}
line4["end"] = (M[0] - 10, (m * ((M[0]-10) - xM)) + yM)
line4["end"] = (M[0]+(-100*dx),M[1]+(-100*dy))
for c in range(maxDiag/6):
#xV = xT#xM + c
#yV = yT#(m * (xV - xM)) + yM
xV, yV = int(ceil(M[0]+(-c*dx))), int(ceil(M[1]+(-c*dy)))
if((xV,yV) in borders or xV <= minX or yV <= minY or xV >= maxX or yV >= maxY):
break
else:
p.append((xV, yV))
results[yV][xV] += 1
lines.append((line1["start"], line1["end"]))
lines.append((line2["start"], line2["end"]))
lines.append((line3["start"], line3["end"]))
lines.append((line4["start"], line4["end"]))
points.append(p)
origins.append(line1["origin"])
origins.append(line2["origin"])
origins.append(line3["origin"])
origins.append(line4["origin"])
else:
continue
else:
continue
else:
continue
print "[O] Ellipse detection partial advance >> %s%%"%(str(percentage(5000,a)))
votes = groupVotes(results, width, height)
return lines, points, origins, votes
def ellipseDetection(pixels, width, height):
lines, points, origins, ellipses = list(), list(), list(), list()
p, objects = objectClassification(pixels, width, height, color=WHITE)
pixelsOr = slicing(pixels, width)
pixels = slicing(pixels, width)
pixels = numpy.array(pixels)
pS = pixels.shape
n = 1.0/1.0
maskX = numpy.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) * n
maskY = numpy.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]) * n
Gx = convolution2D(pixels, maskX)
Gy = convolution2D(pixels, maskY)
for a, ob in enumerate(objects):
print "%d : %d"%(len(objects), a+1)
pix = graham_scan(ob["pixels"])
l, p, o, v = houghEllipses(pixelsOr, pix, Gx, Gy, width, height)
lines += l
points += p
origins += o
ellipse = calcEllipse(v, ob["pixels"])
ellipses.append(ellipse)
visited = [[0 for b in xrange(width)] for a in xrange(height)]
objID = 1
for ellipse in ellipses:
center = ellipse["center"]
objColor = (randint(0,255), randint(0,255), randint(0,255))
pixels, visited, objPixels = bfs(pixelsOr, visited, center, objColor, width, height)
objSize = len(objPixels)
objPrcnt = percentage(width*height, objSize)
ellipse["size"], ellipse["percentage"], ellipse["pixels"] = objSize, objPrcnt, objPixels
return de_slicing(pixels), lines, points, origins, ellipses
|
from load_data import load_X_images, load_Y_labels
x_train = load_X_images('/Users/sunyambagga/Kaggle/Digit_Recognizer/train-images-idx3-ubyte.gz')
y_train = load_Y_labels('/Users/sunyambagga/Kaggle/Digit_Recognizer/train-labels-idx1-ubyte.gz')
#print x_train
#print y_train
# Using Theano and Lasagne
import lasagne
import theano
import theano.tensor as T
def creat_neural_network(input_var=None):
input_layer = lasagne.layers.InputLayer(shape=(None,1,28,28), input_var=input_var)
# Dropout to avoid overfitting
d_input_layer = lasagne.layers.DropoutLayer(input_layer, p=0.2)
hidden_layer_1 = lasagne.layers.DenseLayer(d_input_layer, num_units=800, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.GlorotUniform())
d_hidden_layer_1 = lasagne.layers.DropoutLayer(hidden_layer_1, p=0.5)
hidden_layer_2 = lasagne.layers.DenseLayer(d_hidden_layer_1, num_units=800, nonlinearity=lasagne.nonlinearities.rectify, W=lasagne.init.GlorotUniform())
d_hidden_layer_2 = lasagne.layers.DropoutLayer(hidden_layer_2, p=0.5)
output_layer = lasagne.layers.DenseLayer(d_hidden_layer_2, num_units=10, nonlinearity=lasagne.nonlinearities.softmax)
return output_layer
#creat_neural_network()
input_var = T.tensor4('input')
target_var = T.ivector('target')
nn = creat_neural_network(input_var)
# Error Function
prediction = lasagne.layers.get_output(nn)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# Update weights
parameters = lasagne.layers.get_all_params(nn, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, parameters, learning_rate=0.15, momentum=0.9)
# Creating a theano function for a single training step
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# TRAINING THE NEURAL NET
# Note: I trained it for 200 epochs; can take a few hours
num_iterations = 10
for i in range(num_iterations):
err = train_fn(x_train, y_train)
print "Current iteration" + str(i)
# Making predictions
test_prediction = lasagne.layers.get_output(nn, deterministic=True)
val_fn = theano.function([input_var], T.argmax(test_prediction, axis=1)[0])
# Download Kaggle Test Data: "test_MNIST.csv"
import csv
import numpy as np
X_test = []
data = []
with open("/Users/sunyambagga/Kaggle/Digit_Recognizer/test_MNIST.csv") as file:
lineReader = csv.reader(file, delimiter=',', quotechar="\"")
lineNum = 1
for row in lineReader:
if lineNum == 1:
lineNum = 9
else:
data.append(row)
data = np.array(data, dtype='f')
data = data/np.float32(256)
X_test = data.reshape(-1, 1, 28, 28)
# Writing results to csv
for i in range(len(X_test)):
with open('results.csv', 'a') as f:
# Just to see progress
if i%1000==0:
print "Writing File", i
f.write(str(i+1) + ',' + '"' + str(val_fn([X_test[i]])) + '"' + '\n')
# For Kaggle Submission, include "ImageId","Label" as the first row
# NOTE: With the current results.csv, you will get a 83% accuracy; Try out different parameters, specially num_iterations=200 to get more 99+% accuracy |
def insert(head, data):
new_node = Node(data)
new_node.next, head = head, new_node
return head
|
'''
Plot room and hotel query results. This is intended to be run in
adbs-reservewithus/ReserveWithUsApp/benchmark_data.
'''
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import benchmark_queries as bq
import math
import numpy as np
GRID_SIZE = 5.5
# Get CSVs.
raw_data = (
('Distribution of hotel query times.',
(
('NO OPTIMIZATION',
'hotel_times_no_opt.csv'),
('index on HOTEL.(COUNTRY,CITY)',
'hotel_times_hotel_index.csv'),
('indexes on\n'
'HOTEL.COUNTRY+CITY, ROOM_DATE.SINGLE_DAY_DATE',
'hotel_times_hotel_room_date_indexes.csv'),
)
),
('Distribution of room query times.',
(
('NO OPTIMIZATION',
'room_times_no_opt.csv'),
('index on HOTEL.COUNTRY+CITY',
'room_times_hotel_index.csv'),
('indexes on\n'
'HOTEL.COUNTRY+CITY, ROOM_DATE.SINGLE_DAY_DATE',
'room_times_hotel_room_date_indexes.csv'),
('inverted query',
'room_times_inside_out_only.csv'),
('inverted query and indexes on\n'
'HOTEL.COUNTRY+CITY, ROOM_DATE.SINGLE_DAY_DATE',
'room_times_new_query_indexes_all.csv'),
)
)
)
for plot_idx, (plot_title, plot_grp) in enumerate(raw_data):
grp_data = [(title, bq.load_times_from_csv(filename)[5:], filename)
for title, filename in plot_grp]
max_data = max(max(data) for _, data, _ in grp_data)
bins_max = int(math.ceil(max_data * 100.0)) / 100.0
num_axes = len(plot_grp)
num_cols = int(math.ceil(math.sqrt(num_axes)))
num_rows = int((num_axes + num_cols - 1.0) / num_cols)
f = plt.figure(figsize=(GRID_SIZE * num_cols, GRID_SIZE * num_rows))
f.suptitle(plot_title, fontsize=12)
for idx, (title, data, filename) in enumerate(grp_data):
ax = f.add_subplot(num_rows, num_cols, idx)
ax.hist(data, bins=np.linspace(0., bins_max, num=101))
ax.set_title(title, fontsize=8)
ax.set_ylabel('freq')
ax.set_xlabel('time (s)')
plt.savefig('{}.png'.format(plot_idx))
|
from django.db import models
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView, FormView, ListView
from core.models import Incident, State
from core import forms
class HomePageView(TemplateView):
template_name = 'core/home.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['recent_incidents'] = Incident.objects.all()[:3]
return context
@method_decorator(login_required, name='dispatch')
class ReportPageView(FormView):
template_name = 'core/reports.html'
form_class = forms.ReportForm
context_object_name = 'form'
success_url = '/'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['incidents'] = Incident.objects.order_by('-date_uploaded')[:2]
return context
def form_valid(self, form):
"""
when form is valid, then save the form
"""
incident = form.save(commit=False)
incident.user = self.request.user
incident.save()
return super().form_valid(form)
class CitiesView(ListView):
model = State
paginate_by = 10
context_object_name = "states"
template_name = "core/cities.html"
def get_queryset(self):
return State.objects.filter(incidents__gte=1).distinct()
class NewFeedView(ListView):
model = Incident
paginate_by = 30
template_name = 'core/news.html'
context_object_name = 'news'
def error_404(request, exception):
context = {}
return render(request,'core/404.html', context)
def error_500(request):
context = {}
return render(request,'core/500.html', context) |
# Generated by Django 3.2.7 on 2021-10-06 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lab', '0005_joinus'),
]
operations = [
migrations.AlterField(
model_name='joinus',
name='data',
field=models.TextField(blank=True, max_length=1000),
),
]
|
# -*-coding:Utf-8 -*
from getpass import getpass
import hashlib
mdp = getpass("Tappez mdp :")
print("Je l'imprime ".format(mdp))
#HASH
print(hashlib.algorithms_guaranteed)
#sha1 ne prend que des chaine de bits
chaine_bits = b"mdp"
print(chaine_bits)
mdp = hashlib.sha1(b"mdp")
print(mdp.hexdigest())
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2019-01-27 04:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp1', '0002_auto_20190127_0741'),
]
operations = [
migrations.AddField(
model_name='idea',
name='categories',
field=models.ManyToManyField(blank=True, related_name='ideas', to='myapp1.Category', verbose_name='Categories'),
),
]
|
from flask_admin.contrib.sqla import ModelView
from database.models import Order
from server.model_views.mixins.AuthMixin import AuthMixin
class OrderModelView(AuthMixin, ModelView):
form_excluded_columns = ('product_id')
def __init__(self, session, **kwargs):
super(OrderModelView, self).__init__(Order, session, **kwargs)
|
# Generated by Django 2.2.4 on 2019-09-20 02:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('whiskydatabase', '0030_auto_20190916_1631'),
]
operations = [
migrations.AddField(
model_name='whiskyinfo',
name='rating',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='whiskyinfo',
name='slug',
field=models.IntegerField(default='', editable=False, max_length=200),
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-03-01 17:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0024_auto_20170127_1212'),
]
operations = [
migrations.AlterField(
model_name='robot',
name='type',
field=models.CharField(choices=[('V', 'vrep'), ('S', 'poppy-simu'), ('R', 'real robot')], default='R', max_length=1),
),
]
|
from markovmodel import MarkovModel
from tokenize import tokenize
import pickle
import sys
if __name__ == "__main__":
# set to 100,000 to avoid max recursion depth exceeded error
sys.setrecursionlimit(100000)
mapping = {
1: [1, 2, 3, 4, 5, 6, 7],
2: [2, 3, 4, 5],
3: [2, 3],
4: [1, 2, 3, 4, 5, 6],
5: [1, 2, 3, 4]
}
# c = Corpus version number
# n = lookback
for c in list(mapping.keys()):
for n in mapping[c]:
words = tokenize("corpus/clean_corpus_{}.txt".format(c))
# clear file, train model and pickle it
open("models/trained_model{}_{}.p".format(c, n), 'wb').close()
with open("models/trained_model{}_{}.p".format(c, n), "wb") \
as file:
model = MarkovModel()
model.train(words, n)
pickle.dump(model, file)
|
import tensorflow as tf
"""
理解:
session对象可调用"算子"(将算子作为参数传入), 并返回计算结果
"""
matrix1 = tf.constant([[3, 3]])
print(matrix1)
matrix2 = tf.constant([[2], [2]])
print(matrix2)
product = tf.matmul(matrix1, matrix2)
# method 1
sess = tf.Session()
result1 = sess.run(product)
print(result1)
sess.close()
# method 2
with tf.Session() as sess:
result2 = sess.run(product)
print(result2) |
import sys
import json
f = open(sys.argv[1], 'r')
lines = f.readlines()
module_dic, tmp, seq = {}, {}, 0
for rawline in lines:
line = rawline.strip(' ')
line = line.strip('\n')
if line.startswith('module'):
module_name = line.split(' ')[1]
elif line.startswith('endmodule'):
if seq == 1:
tmp['seq'] = 1
seq = 0
else:
tmp['seq'] = 0
if module_name == "GEN_CLKGATE_D1" or module_name == "GEN_SYNC3C_STRICT_D1":
tmp['seq'] = 1
module_dic[module_name] = tmp
tmp = {}
elif line.startswith('input'):
tmp[line.split(' ')[-1][:-1]] = 'input'
if line.split(' ')[-1][:-1] == 'clk':
seq = 1
elif line.startswith('output'):
tmp[line.split(' ')[-1][:-1]] = 'output'
elif line.startswith('udp_dff') or line.startswith('udp_tlat'):
seq = 1
f.close()
jsobj = json.dump(module_dic, open(sys.argv[2], 'w'))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 13 20:48:13 2020
"""
import tkinter as tk
from math import log
from statistics import mean
import time
sample_size = 10
width = 460
def sec_to_hms(seconds):
s = seconds % 60
m = (seconds//60) % 60
h = seconds // 60 // 60
return "{}:{}:{}".format(h, m, s)
class Graph(tk.Canvas):
def __init__(self, master, width, height, max_x=10, max_y=100, bg='white',
refresh_rate = 15, window_size=7):
super().__init__(master, width=width, height=height, bg=bg,
highlightthickness=0)
self.width = width
self.height = height
self.points = []
self.all_points = []
self.max_x = max_x
self.max_y = max_y
self.min_x = 0
self.min_y = 0
self.refresh_rate = refresh_rate
self.window_size = window_size
self.actions = []
self.start_time = 0
self.paused_time = 0 # time interval of pause
self.pause_time = 0 # time of pause start
self.keypresses = 0
self.clicks = 0
self.active = "inactive"
self.cur_apm = 0
self.max_apm = 0
self.avg_apm = 0
self.active_time = 0
self.display_emb = False
self.embedded_apm = None
def axis(self):
"""draws the axis of the graph"""
ax = log(self.max_x - self.min_x, 10)
ay = log(self.max_y - self.min_y, 10)
div_x = int(10**(ax-1) * (self.max_x - self.min_x) // 10**ax+1)
div_y = int(10**(ay-1) * (self.max_y - self.min_y) // 10**ay+1)
nb_points_x = int((self.max_x - self.min_x) // div_x)
nb_points_y = int((self.max_y - self.min_y) // div_y)
color = "#CCCCFF" # light blue
for i in range(1, nb_points_x):
x = int(i * div_x / (self.max_x - self.min_x) * self.width)
self.create_line(x, self.height, x, self.height-5, fill=color)
t = sec_to_hms(int(self.min_x + i*div_x))
self.create_text(x, self.height-10,
text="{}".format(t),
font='Arial 7', fill=color)
for i in range(1, nb_points_y):
y = int((i * div_y) / (self.max_y - self.min_y) * self.height)
self.create_line(0, self.height - y, 5, self.height - y,
fill=color)
self.create_text(7, self.height - y, text="{}".format(int(self.min_y + i*div_y)),
font='Arial 7', fill=color, anchor='w')
def draw_point(self, pts, i, redraw_axis=True):
if i > 0:
x0 = int((pts[i-1][0] - self.min_x) / (self.max_x - self.min_x) * self.width)
y0 = int((pts[i-1][1] - self.min_y) / (self.max_y - self.min_y) * self.height)
y0 = self.height - y0
x1 = int((pts[i][0] - self.min_x) / (self.max_x - self.min_x) * self.width)
y1 = int((pts[i][1] - self.min_y) / (self.max_y - self.min_y) * self.height)
y1 = self.height - y1
self.create_line(x0, y0, x1, y1, fill='#6666FF', width=2)
self.create_polygon(x0, self.height,
x0, y0,
x1, y1,
x1, self.height, fill="#272770")
if redraw_axis:
self.axis()
def add_point(self, x, y):
self.points.append((x, y))
self.all_points.append((x, y))
def display(self, intro=False):
self.delete("all")
if self.points:
# adjusting axis scales (after discarding old points)
m = max([p[1] for p in self.points])
if m < self.max_y//2:
self.max_y = max(self.max_y//2, 10)
if self.points[-1][1] > max(self.max_y - 10, 10):
self.max_y *= 2
self.max_x = max(self.points[-1][0], 5)
self.min_x = max(self.max_x - self.window_size, 0)
# discarding old points that went out of the graph window
i = 0
while i < len(self.points) and self.points[i][0] < self.min_x:
i += 1
self.points = self.points[i:]
for i in range(len(self.points)):
self.draw_point(self.points, i, redraw_axis=False)
if intro:
self.create_text(self.width//2, self.height//2,
text="Start : Ctrl+Enter\nReset : Ctrl+Backspace\nToggle hovering counter : Ctrl+Inser",
font="monospace 18",
anchor="c",
fill="#777777",
justify='center')
else:
self.axis()
def add_action(self, action_time):
self.actions.append(action_time - self.start_time - self.paused_time)
def refresh(self):
if self.active == "active":
self.after(1000//self.refresh_rate, self.refresh)
t = time.time() - self.start_time - self.paused_time
# discarding actions older than [sample_size] seconds
if self.actions:
i = 0
while i < len(self.actions) and t - self.actions[i] > sample_size:
i += 1
self.actions = self.actions[i:]
# checking inactive time (you are considered inactive if you haven't
# done any action in the last 3 seconds)
i = 0
while i < len(self.actions) and t - self.actions[i] > 3:
i += 1
if self.actions[i:]:
self.active_time += 1/self.refresh_rate
self.cur_apm = 60 / sample_size * len(self.actions)
if self.cur_apm > self.max_apm:
self.max_apm = self.cur_apm
n = len(self.all_points)
self.avg_apm = round((n * self.avg_apm + self.cur_apm) / (n+1), 2)
self.add_point(t, self.cur_apm)
self.display()
def play(self):
if self.active == "inactive":
self.start_time = int(time.time())
elif self.active == "paused":
self.paused_time += time.time() - self.pause_time
self.active = "active"
self.refresh()
def pause(self):
self.pause_time = time.time()
self.delete("all")
self.active = "paused"
self.min_x = 0
self.min_y = 0
self.max_x = max(max([p[0] for p in self.all_points]), 10)
self.max_y = max(max([p[1] for p in self.all_points]), 10) + 10
p = [self.all_points[i] for i in range(len(self.all_points))]
while len(p) > width//2:
p = [p[i] for i in range(len(p)) if i%2 == 0]
for i in range(len(p)):
self.draw_point(p, i, redraw_axis=False)
self.axis()
def reset(self):
self.active = "inactive"
self.paused_time = 0
self.min_x = 0
self.max_x = 10
self.min_y = 0
self.max_y = 10
self.keypresses = 0
self.start_time = 0
self.pause_time = 0 # time at which it pause began
self.keypresses = 0
self.clicks = 0
self.cur_apm = 0
self.max_apm = 0
self.avg_apm = 0
self.active_time = 0
self.actions = []
self.points = []
self.all_points = []
self.display(intro=True)
|
from django.db import models
# Create your models here.
class Technologies(models.Model):
techno = models.CharField(max_length = 50)
def __str__(self):
return self.techno
|
import pygame
from game import Game
def main():
game = Game(size=600, rows=30, fps=20)
game.run()
if __name__ == "__main__":
main()
|
# Program to multiply two matrices using nested loops
import numpy as np
import xlrd
from builtins import str
def normalize(A):
column_sums = A.sum(axis=0)
new_matrix = A / column_sums[np.newaxis, :]
return new_matrix
workbook = xlrd.open_workbook('1.xlsx')
sheet_names = workbook.sheet_names()
sheet = workbook.sheet_by_name(sheet_names[0])
roww=0
coll=0
a=[]
for row_idx in range(sheet.nrows):
#print(sheet.cell(row_idx,0))
for col_idx in range(sheet.ncols):
cell = sheet.cell(row_idx, col_idx)
print(str(int(cell.value))+" ",end="")
cell.value=int(cell.value)
coll=col_idx
a.append(((cell.value)))
roww=row_idx
print()
print()
print(str(roww) + " " + str(coll))
print(a)
print(max(a))
roww=max(a)
roww=int(roww)
print(roww)
coll=roww
arr=np.empty( (roww,roww) )
print(arr)
print()
print()
for i in range(roww):
for j in range(coll):
if(i==j):
arr[i][j]=0
else :
arr[i][j]=0
np.dtype('int32')
print(arr)
for i in range(sheet.nrows):
for j in range(sheet.ncols):
print(sheet.cell(i,0).value,end='')
print(sheet.cell(i,1).value)
arr[(int(sheet.cell(i,0).value)-1)][(int(sheet.cell(i,1).value)-1)]=1
print(arr[(int(sheet.cell(i,0).value)-1)][(int(sheet.cell(i,1).value)-1)])
print()
print("hui")
print()
print(arr)
input()
# result is 3x4
arr=normalize(arr)
result = np.zeros( (300,300) )
print(arr)
# iterate through rows of X
for i in range(len(arr)):
# iterate through columns of Y
for j in range(len(arr[0])):
# iterate through rows of Y
for k in range(len(arr)):
# print("this is arr[i][k]"+str(arr[i][k]))
#print("this is arr[k][j]"+str(arr[k][j]))
result[i][j] += arr[i][k] * arr[k][j]
#print("this is the result"+str(result[i][j]))
for r in result:
print(r)
input()
|
#!/usr/bin/python3
"""
collect-psd2-trusted-certificates.py
Description:
Download all trusted psd2 service provider certificates
Usage:
collect-psd2-trusted-certificates.py -o <filename> [--verbose]
Options:
-h --help Show this screen.
-o --output <filename> File path
--verbose
"""
import base64
import json
import logging
import os
import re
import requests
from datetime import datetime
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from docopt import docopt
from lxml import etree
from typing import Dict, Set
def collect() -> Set[str]:
"""
Retrieve certificate of all trusted service providers and write it to an file.
:return: collection of x509 certificates.
"""
certificates = set()
trusted_service_providers = _find_trusted_service_providers()
for country_code, service_provider_names in trusted_service_providers.items():
certificates_by_country = _collect_certificates(country_code, service_provider_names)
certificates.update(certificates_by_country)
return certificates
def write_certs_to_file(certificates: set, path: str):
"""
Write all passed certificates to a file.
:param certificates: list of certificates of all trusted service providers
:param path: path to write all certificates
"""
if os.path.exists(path):
os.remove(path)
with open(path, 'w') as f:
for certificate in certificates:
f.write("%s\n" % certificate)
def not_expired_or_invalid(pem_as_string: str) -> bool:
"""
:param pem_as_string: x509 PEM formatted certificate
:return: False when certificate is expired
"""
pem_as_byte = str.encode(pem_as_string)
try:
cert = x509.load_pem_x509_certificate(pem_as_byte, default_backend())
except ValueError as ve:
logging.info(f"Unable to parse the following certificate because it contains an invalid value:\n"
+ pem_as_string)
return False
current_datetime = datetime.now()
if current_datetime > cert.not_valid_after:
logging.info("Expired certificate with serial number '%s'.", cert.serial_number)
return False
else:
return True
def _find_trusted_service_providers() -> Dict[str, list]:
"""
Extract list of all trusted service providers
:return: dict of trusted service providers by country code
"""
trusted_service_providers_by_country = {}
url = 'http://esignature.ec.europa.eu/efda/tl-browser/api/v1/search/tsp_list'
resp = requests.get(url)
_assert_status_code(resp.status_code, 200)
for company in resp.json():
for service in company['services']:
if any("QCertESeal" in s for s in service['qServiceTypes']) \
or any("QWAC" in s for s in service['qServiceTypes']):
country_code = service['countryCode']
service_name = service['serviceName']
if country_code not in trusted_service_providers_by_country:
trusted_service_providers_by_country[country_code] = [service_name]
else:
trusted_service_providers_by_country[country_code].append(service_name)
logging.info('List of trusted service providers\n')
logging.info(
"%s",
json.dumps(
trusted_service_providers_by_country,
sort_keys=True,
indent=4
)
)
return trusted_service_providers_by_country
def _collect_certificates(country_code: str, service_names: list) -> Set[str]:
"""
Downloads X509 certificates of trusted service providers.
:param country_code: country code alpha-2
:param service_names: collection of trusted service provider names
:return: collection of x509 certificates as String
"""
certificates = set()
url = f"http://esignature.ec.europa.eu/efda/tl-browser/api/v1/browser/download/{country_code}"
resp = requests.get(url)
_assert_status_code(resp.status_code, 200)
resp.encoding = "utf-8-sig"
content = resp.text
dom = _create_xml_root_node(content)
for service_name in service_names:
escaped_service_name = service_name.replace('"', '"')
xpath = (
f".//ServiceInformation[ServiceName[Name[text()=\"{escaped_service_name}\"]]]"
'/ServiceDigitalIdentity/DigitalId/X509Certificate/text()'
)
for certificate in dom.xpath(xpath):
certificate = _to_pem_format(certificate)
certificates.add(certificate)
return certificates
def _assert_status_code(actual: int, expected: int):
"""
Asserts the HTTP status code.
:param actual: acutal HTTP status code
:param expected: expected HTTP status code
"""
if not actual == expected:
msg = f"HTTP Status Code expected {actual} to be {expected}."
raise AssertionError(msg)
def _create_xml_root_node(content: str):
"""
Decode and parses an XML document.
:param content: base64 encode xml string
:return: XML root node
"""
# remove default xml namespace otherwise it can not be parsed
xml_as_string = re.sub(' xmlns="[^"]+"', '', content, count=1)
return etree.fromstring(bytes(xml_as_string, encoding='utf-8'))
def _to_pem_format(cert_as_string: str) -> str:
"""
:param cert_as_string: certificate as string
:return: x509 PEM formatted certificate
"""
if "\n" not in cert_as_string:
cert_as_string = _wrap(cert_as_string, 65)
pem_as_string = f"-----BEGIN CERTIFICATE-----\n{cert_as_string}\n-----END CERTIFICATE-----"
return pem_as_string
def _wrap(text: str, max_width: int) -> str:
s = ''
for i in range(0, len(text), max_width):
s = s + text[i:i + max_width] + '\n'
return s.rstrip("\n")
if __name__ == '__main__':
arguments = docopt(__doc__)
certificates_file_path = arguments['--output']
verbose = arguments['--verbose']
if verbose:
logging.basicConfig(format='%(message)s', level=logging.INFO)
certs = collect()
not_expired_certs = [c for c in certs if not_expired_or_invalid(c)]
cert_count = len(not_expired_certs)
expired_cert_count = len(certs) - cert_count
logging.info('Found %s valid certificates and %s expired certificates.', cert_count, expired_cert_count)
write_certs_to_file(not_expired_certs, certificates_file_path)
|
from aiohttp import web
from aiohttp_session import setup as setup_session
from aiohttp_session.cookie_storage import EncryptedCookieStorage
import base64
from cryptography import fernet
import aiohttp_jinja2
import jinja2
from aiohttp_security import setup as setup_security
from aiohttp_security import SessionIdentityPolicy
from aiopg.sa import create_engine
from aiopg import create_pool
from .routes import setup_routes
from . import settings
from .db_auth import DBAuthorizationPolicy
async def create_app():
app = web.Application()
aiohttp_jinja2.setup(
app,
loader=jinja2.PackageLoader('blog', 'templates')
)
app['db'] = await create_engine(dsn=settings.DATABASE)
fernet_key = fernet.Fernet.generate_key()
secret_key = base64.urlsafe_b64decode(fernet_key)
setup_session(app, EncryptedCookieStorage(secret_key))
setup_security(app, SessionIdentityPolicy(), DBAuthorizationPolicy(app['db']))
app.router.add_static('/blog/static/',
path=str('blog/static'),
name='static')
app['static_root_url'] = '/blog/static'
setup_routes(app)
return app
|
# Before running with
# $ flask run or $ python run.py
# make sure to run these:
# $ export export FLASK_APP=run.py
# $ export FLASK_ENV=development
from app import app
if __name__ == "__main__":
app.run()
|
import requests
from crawler.models import *
def crawl(graph_url, company):
r = requests.get(graph_url)
response = r.json()
while response["data"]!=[]:
for a in response["data"]:
time = a["created_time"]
post_id = a["id"]
message = a["message"]
url_likes = "https://graph.facebook.com/%s/likes?key=value&access_token=176565146081574|9f55220446aa4c2d44560f2ebde2430b&summary=true" %post_id
print url_likes
r = requests.get(url_likes)
response_likes = r.json()
summary = response_likes["summary"]
no_of_likes = summary["total_count"]
message = message+""" #"""
hash_find = message.split("#")
len_hash = len(hash_find)
h = ""
for i in range(1,(len_hash-1)):
hash_find_new = hash_find[i].split(" ")
hash_find_new[0] = hash_find_new[0].strip()
h = h+hash_find_new[0]+","
print h
data = post_info(message=message,category=h,time=time,id=post_id,no_of_likes = no_of_likes,company=company)
data.save()
redir_url = response["paging"]["next"]
r = requests.get(redir_url)
|
class Solution(object):
def isPalindrome(self, x):
if x<0: return False
left_div = 1000000000
right_div = 10
while left_div >= right_div:
if x % left_div != x:
l = int(x/left_div)
l = l - (l-l%10)
r = x - (x-x%right_div)
r = r - (r%(right_div/10))
r = r/(right_div/10)
if l != r : return False
right_div *=10
left_div /= 10
return True
print(Solution().isPalindrome(54145)) |
import sys
import vdb.testmods as v_testmods
class AttachTest(v_testmods.VtracePythonProcTest):
modname = 'vdb.testmods.attachtest'
def runTest(self):
self.trace.setMode('RunForever', True)
self.runProcess()
assert( self.trace.getMeta('ExitCode', 0) == 99 )
if __name__ == '__main__':
v_testmods.waitForTest()
sys.exit(99)
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class TypeEnum(object):
"""Implementation of the 'Type' enum.
Specifies the type of email to be sent
Attributes:
TEXT: TODO: type description here.
HTML: TODO: type description here.
"""
TEXT = 'text'
HTML = 'html'
|
'''
210. Course Schedule II
There are a total of n courses you have to take labelled from 0 to n - 1.
Some courses may have prerequisites, for example, if prerequisites[i] = [ai, bi] this means you must take the course bi before the course ai.
Given the total number of courses numCourses and a list of the prerequisite pairs, return the ordering of courses you should take to finish all courses.
If there are many valid answers, return any of them. If it is impossible to finish all courses, return an empty array.
Example 1:
Input: numCourses = 2, prerequisites = [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished course 0. So the correct course order is [0,1].
Example 2:
Input: numCourses = 4, prerequisites = [[1,0],[2,0],[3,1],[3,2]]
Output: [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3].
Example 3:
Input: numCourses = 1, prerequisites = []
Output: [0]
Constraints:
1 <= numCourses <= 2000
0 <= prerequisites.length <= numCourses * (numCourses - 1)
prerequisites[i].length == 2
0 <= ai, bi < numCourses
ai != bi
All the pairs [ai, bi] are distinct.
'''
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
# build adjacency list for prerequisites
prereq = { course:[] for course in range(numCourses)}
for course, pre in prerequisites:
prereq[course].append(pre)
output = []
# visited: courses that have already been visited and added to the output
# path: courses on the current dfs path
visited, path = set(), set()
def dfs(course):
# if detect a cycle, return false
if course in path:
return False
# if already added to the output, simply return true
if course in visited:
return True
# run dfs on the course and its prerequisites recursively (dfs + backtracking)
path.add(course)
for pre in prereq[course]:
if not dfs(pre):
return False
path.remove(course)
# if the course can be visited, add to visited and output
visited.add(course)
output.append(course)
return True
for course in range(numCourses):
if not dfs(course):
return []
return output
|
from config import *
from os import path
def note_to_num(full_note: tuple):
""" (note, pitch) => num """
note = full_note[0]
pitch = full_note[1]
num = base_notes.index(note) + 12 * (pitch - 1)
return num
def num_to_note(num: int):
""" num => (note, pitch) """
note = base_notes[num % 12]
pitch = num // 12 + 1
return [note, pitch]
def get_strings_from_base_notes(strings_base_notes: list, frets_num: int):
""" Generate list of notes numbers, for each string, from it's base note """
strings = []
for base_note in strings_base_notes:
base_note_num = note_to_num(base_note)
string = range(base_note_num, base_note_num + frets_num + 2)
strings.append(string)
return strings
def string_loc_to_num(strings, string_loc: tuple):
"""
Convert string location => (string_num, location)
to note number
"""
string = strings[string_loc[0] - 1]
note_num = string[string_loc[1]]
return note_num
def song_to_nums(song: list, strings: list):
""" Convert a song to it's notes numbers """
return [string_loc_to_num(strings, string_loc) for string_loc in song]
def nums_to_song_tabs(song_notes_nums: list, strings: list):
""" Convert note_nums to a song - strum locations on the instrument """
song_tabs = []
# get the lowest note in the song
song_lowest_note_num = list(set(song_notes_nums))[0]
# get the lowest note in the ukulele
instrument_lowest_notes = [string[0] for string in strings]
instrument_lowest_note_num = list(set(instrument_lowest_notes))[0]
# if the lowest note in the guitar is lower than the one in the ukulele:
if song_lowest_note_num < instrument_lowest_note_num:
diff = instrument_lowest_note_num - song_lowest_note_num
song_notes_nums = [num + ((diff // 12 + 1) * 12) for num in song_notes_nums]
# convert the note_nums in the song, to the locations in the ukulele
for note_num in song_notes_nums:
closest_string = None
smallest_diff = 1000000
for i in range(len(instrument_lowest_notes)):
inst_note = instrument_lowest_notes[i]
note_diff = note_num - inst_note
if note_diff >= 0:
if note_diff < smallest_diff:
smallest_diff = note_diff
closest_string = i + 1
song_tabs.append((closest_string, strings[closest_string - 1].index(note_num)))
return song_tabs
def song_tabs_to_scheme(song_tabs: list, num_of_strings: int):
""" Convert note locations on the strings, to a tab scheme """
song_tabs_scheme = []
base_note_scheme = ['-' for i in range(num_of_strings)]
# for every note
for note_loc in song_tabs:
note_scheme = list(base_note_scheme)
note_scheme[note_loc[0] - 1] = note_loc[1]
song_tabs_scheme.append(note_scheme)
return song_tabs_scheme
def tabs_scheme_to_txt_file(song_name, song_tabs_scheme):
""" Output tabs scheme to a .txt file """
num_of_strings = len(song_tabs_scheme[0])
with open(path.join(txt_file_loc, song_name + ".txt"), 'w') as song_tabs_file:
for i in range(num_of_strings):
# we need to reverse the order of the strings for the printout lol
curr_string = [str(note_scheme[-(i + 1)]) for note_scheme in song_tabs_scheme]
curr_string_txt = ""
for j in curr_string:
if len(str(j)) == 1:
curr_string_txt += " " + str(j)
else:
curr_string_txt += " " + str(j)
curr_string_txt += '\n'
song_tabs_file.writelines(curr_string_txt)
|
from django.test import TestCase
# Create your tests here.
class TestTestCase(TestCase):
def test_test(self):
self.assertEqual(True, True)
|
#This class contains all needed items and tools for manipulating and analyze chinese dates
from datetime import datetime
from datetime import timedelta
class Cycle(object):
"""Class make and contain dictionary for all possibly combination with
heavenly stems and earth branches
Chinese date have 4 elements:
-year
-month
-day
-hour
Each date element equal some combination heavenly stems and earth branches
date in west world = 1.01.1901
date in chinese world = (year)[heavenly stems][earth branch](month)......
example = [[yand wood rat],[ox ying wood],
[yang earth rooster],[yang fire tiger]]
********
year [yand wood rat]
********
month [ox ying wood]
********
day [yang earth rooster]
********
hour [yang fire tiger
********
Chinese horoscope involves 60 combination of h.stems and e.branches
Every cycle of year/month/day/hours equal 60.
Cycle start with [yang wood rat]: yang wood is the first earthly
branshes element. rat is the first heavenly stems element.
second element in cycle eaual [heavenly_stems[1],earthly_branches[1]], etc
"""
#len(earth_branches) = 10
earth_branches = ("rat", "ox", "tiger", "rabbit", "dragon",
"snake", "horse", "sheep", "monkey", "rooster",
"dog", "pig")
#len(heavenly_stems) = 12
heavenly_stems = ("yang wood","ying wood","yang fire",
"ying fire","yang earth","ying earth",
"yang metal","ying metal","yang water","ying water")
# method of Ppsckovskiy D.V.
# moon status for date (B)
# B = Bo + number of month + count of days
# Bo = moon age on 30Th November of last year
# moon age of newmoon equal 0
# {year in Methonic cycle : Bo}
table_year = {0: 27, 1: 8, 2: 19, 3: 0, 4: 11, 5: 22, 6: 3, 7: 14,
8: 25, 9: 6, 10: 17,11: 28, 12: 9,13: 20,14: 2 ,15: 13,
16: 24, 17: 5,18: 16}
#modify table contains
table_new = {0:"23:06 31/01/1919", 7 : "17:20 12/02/1926", 14 : "23:20 25/01/1933",
1:"21:34 19/02/1920", 8 : "08:54 2/02/1927", 15 : "00:43 14/02/1934",
2:"00:36 8/02/1921", 9 : "20:19 22/01/1928", 16 : "16:27 3/02/1935",
3:"23:48 27/01/1922", 10 : "17:55 9/02/1929", 17 : "07:18 24/01/1936",
4:"19:07 15/02/1923", 11 : "19:07 29/01/1930", 18 : "07:34 11/02/1937",
5:"01:38 5/02/1924", 12 : "13:10 17/02/1931",
6:"14:45 24/01/1925", 13 : "14:45 6/02/1932"}
heavtocycle = heavenly_stems * 6
earthtocycle = earth_branches * 5
comb_dict = {x:y for x,y in enumerate(zip(heavtocycle,earthtocycle))}
class Date(datetime, Cycle):
def __init__(self, *kwrgs):
super(Date, self).__init__(*kwrgs)
#new cycle of year started in 1924
self.start_year = 1924
#new cycle of hours and days started in 16 dec 1923 23:00
self.start_day = datetime(1923,12,16, 23, 0)
#number of year in methonic cycle equal year % 19
self.methon = self.year % 19
#moon age (B) by method of Ppsckovskiy D.V.
#for every year in methonic cycle
self.b = self.table_year[self.methon]
self.wyear = datetime.strptime(self.table_new[self.methon], "%H:%M %d/%m/%Y")
#very important thing timedelta for each cycle in methonic cycle
self.delta = timedelta(0, 2952)
#different between same years in methonic cycle
self.big_delta = timedelta(6939, 25368)
def __str__(self):
a = (super(Date,self).__str__()).center(30)
return "{0}\n{1:.<15}[{2}]\n{3:.<15}[{4}]\n{5:.<15}[{6}]\n{7:.<15}[{8}]\n".format(a,"year", self.convert_year(),"month",self.convert_month(),
"day",self.convert_day(),"hour", self.convert_hour())
def __repr__(self):
if self.hour != 0 and self.minute != 0:
lst = [self.convert_year(),self.convert_month(),
self.convert_day(), self.convert_hour()]
else :
lst = [self.convert_year(),self.convert_month(),
self.convert_day() ]
lst = ["{0} {1}".format(x[0], x[1]) for x in lst ]
a = super(Date,self).__repr__()
return "{0}\n{1}".format(a,lst)
def date_of_myear(self):
"""Return date of new moon year"""
count_cycle = (self.year - self.wyear.year) / 19
new_moon_year = self.wyear + count_cycle * (self.big_delta + self.delta)
return new_moon_year
def convert_year(self):
""" Return chinese representation of year """
if self.date_of_myear() <= self:
num = (self.year - 1924) % 60
else:
num = ((self.year - 1924 -1) % 60)
return self.comb_dict[num]
def num_of_index(self):
"""This function return the index of first moon month in self.comb_dict
for a given year.For certain heavenly stems of year have certain index
the first moon month
"""
#heavenly stem of current year
year = self.convert_year()[0]
lst = [[(0,5),2],[(1,6),14],[(2,7), 26],
[(3,8),38],[(4,9), 51]]
for c in lst:
if year == self.heavenly_stems[c[0][0]] or self.heavenly_stems[c[0][1]]:
return c [1]
"""
if year == self.heavenly_stems[0] or year == self.heavenly_stems[5]:
indexa = 2
elif year == self.heavenly_stems[1] or year == self.heavenly_stems[6]:
indexa = 14
elif year == self.heavenly_stems[2] or year == self.heavenly_stems[7]:
indexa = 26
elif year == self.heavenly_stems[3] or year == self.heavenly_stems[8]:
indexa = 38
elif year == self.heavenly_stems[4] or year == self.heavenly_stems[9]:
indexa = 51
return indexa
"""
def convert_month(self):
""" Return chinese representation of month """
month_delta = timedelta(29.530588853)
f = []
for c in range(12):
f.append(self.date_of_myear() + c * month_delta)
month_index = filter(lambda x: self > x, f)
if month_index :
index = (self.num_of_index() + f.index(month_index[-1])) % 60
else:
index = (self.num_of_index() - 1) % 60
return self.comb_dict[index]
def convert_day(self):
""" Return chinese representation of day """
return self.comb_dict[(self - self.start_day).days % 60]
def convert_hour(self):
""" Return chinese representation of hour """
delta = self - self.start_day
delta = int((delta.total_seconds() / 3600) / 2) % 60
return self.comb_dict[delta]
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from collections import defaultdict
from dataclasses import dataclass
from typing import Any
from pants.backend.python.util_rules.pex import Pex, PexProcess, PexRequest
from pants.backend.tools.yamllint.subsystem import Yamllint
from pants.core.goals.lint import LintFilesRequest, LintResult
from pants.core.util_rules.partitions import Partition, Partitions
from pants.engine.fs import Digest, DigestSubset, MergeDigests, PathGlobs
from pants.engine.internals.native_engine import FilespecMatcher, Snapshot
from pants.engine.process import FallibleProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.util.dirutil import find_nearest_ancestor_file, group_by_dir
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
class YamllintRequest(LintFilesRequest):
tool_subsystem = Yamllint
@dataclass(frozen=True)
class PartitionInfo:
config_snapshot: Snapshot | None
@property
def description(self) -> str:
if self.config_snapshot:
return self.config_snapshot.files[0]
else:
return "<default>"
@dataclass(frozen=True)
class YamllintConfigFilesRequest:
filepaths: tuple[str, ...]
@dataclass(frozen=True)
class YamllintConfigFiles:
snapshot: Snapshot
source_dir_to_config_files: FrozenDict[str, str]
# @TODO: This logic is very similar, but not identical to the one for scalafmt. It should be generalized and shared.
@rule
async def gather_config_files(
request: YamllintConfigFilesRequest, yamllint: Yamllint
) -> YamllintConfigFiles:
"""Gather yamllint configuration files."""
source_dirs = frozenset(os.path.dirname(path) for path in request.filepaths)
source_dirs_with_ancestors = {"", *source_dirs}
for source_dir in source_dirs:
source_dir_parts = source_dir.split(os.path.sep)
source_dir_parts.pop()
while source_dir_parts:
source_dirs_with_ancestors.add(os.path.sep.join(source_dir_parts))
source_dir_parts.pop()
config_file_globs = [
os.path.join(dir, yamllint.config_file_name) for dir in source_dirs_with_ancestors
]
config_files_snapshot = await Get(Snapshot, PathGlobs(config_file_globs))
config_files_set = set(config_files_snapshot.files)
source_dir_to_config_file: dict[str, str] = {}
for source_dir in source_dirs:
config_file = find_nearest_ancestor_file(
config_files_set, source_dir, yamllint.config_file_name
)
if config_file:
source_dir_to_config_file[source_dir] = config_file
return YamllintConfigFiles(config_files_snapshot, FrozenDict(source_dir_to_config_file))
@rule
async def partition_inputs(
request: YamllintRequest.PartitionRequest, yamllint: Yamllint
) -> Partitions[Any, PartitionInfo]:
if yamllint.skip:
return Partitions()
matching_filepaths = FilespecMatcher(
includes=yamllint.file_glob_include, excludes=yamllint.file_glob_exclude
).matches(request.files)
config_files = await Get(
YamllintConfigFiles, YamllintConfigFilesRequest(filepaths=tuple(sorted(matching_filepaths)))
)
default_source_files: set[str] = set()
source_files_by_config_file: dict[str, set[str]] = defaultdict(set)
for source_dir, files_in_source_dir in group_by_dir(matching_filepaths).items():
files = (os.path.join(source_dir, name) for name in files_in_source_dir)
if source_dir in config_files.source_dir_to_config_files:
config_file = config_files.source_dir_to_config_files[source_dir]
source_files_by_config_file[config_file].update(files)
else:
default_source_files.update(files)
config_file_snapshots = await MultiGet(
Get(Snapshot, DigestSubset(config_files.snapshot.digest, PathGlobs([config_file])))
for config_file in source_files_by_config_file
)
return Partitions(
(
*(
Partition(tuple(sorted(files)), PartitionInfo(config_snapshot=config_snapshot))
for files, config_snapshot in zip(
source_files_by_config_file.values(), config_file_snapshots
)
),
*(
(
Partition(
tuple(sorted(default_source_files)), PartitionInfo(config_snapshot=None)
),
)
if default_source_files
else ()
),
)
)
@rule(desc="Lint using yamllint", level=LogLevel.DEBUG)
async def run_yamllint(
request: YamllintRequest.Batch[str, PartitionInfo], yamllint: Yamllint
) -> LintResult:
yamllint_bin = await Get(Pex, PexRequest, yamllint.to_pex_request())
partition_info = request.partition_metadata
snapshot = await Get(Snapshot, PathGlobs(request.elements))
input_digest = await Get(
Digest,
MergeDigests(
(
snapshot.digest,
yamllint_bin.digest,
*(
(partition_info.config_snapshot.digest,)
if partition_info.config_snapshot
else ()
),
)
),
)
process_result = await Get(
FallibleProcessResult,
PexProcess(
yamllint_bin,
argv=(
*(
("-c", partition_info.config_snapshot.files[0])
if partition_info.config_snapshot
else ()
),
*yamllint.args,
*snapshot.files,
),
input_digest=input_digest,
description=f"Run yamllint on {pluralize(len(request.elements), 'file')}.",
level=LogLevel.DEBUG,
),
)
return LintResult.create(request, process_result)
def rules():
return [*collect_rules(), *YamllintRequest.rules()]
|
# coding=utf-8
from hashlib import md5
from json import loads
from base64 import b64encode, b64decode
import re
import sys
import urllib.request
from urllib.parse import urljoin, urlencode
import http.cookiejar
def hash_md5(str_input):
return md5(str_input.encode('utf-8')).hexdigest()
def from_jsonp(jsonp_str):
jsonp_str = jsonp_str.strip()
return loads(jsonp_str[1:-1])
def strip_tags(html_str):
dr = re.compile(r'<[^>]+>', re.S)
return dr.sub('', html_str)
def selectExists(lists):
for i in lists:
if i:
return i
return None
def getEncodeKey():
"""获取煎蛋网加密key"""
html = crawlHtml('http://jandan.net/ooxx')
js_reg = re.findall(r'src="//cdn.jandan.net/static/min/[\w\d\.]+.js"', html)
js_url = 'https:' + js_reg[0][5:-1]
js_html = crawlHtml(js_url)
app_secret = re.findall(r'c=[\w\d\_]+\(e,"[\w\d]+"\);', js_html)
return app_secret[0].split('"')[1]
def crawlHtml(url, referer='https://jandan.net/', host='jandan.net'):
"""获取页面源码"""
cookie_support = urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar())
#proxy_support = urllib.request.ProxyHandler({"http":"115.159.50.56:8080"})
opener = urllib.request.build_opener(cookie_support, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
opener.addheaders = [('User-agent', 'Mozilla/5.0'), ('Accept', '*/*'), ('Referer', referer), ('Host', host)]
try:
urlop = opener.open(url)
html = urlop.read().decode('utf-8')
urlop.close()
return html
except Exception as e:
print(e, url)
sys.exit(0)
def extractPic(n, x):
"""由图片hash获取真实地址"""
k = 'DECODE'
f = 0
x = hash_md5(x)
w = hash_md5(x[0:16])
u = hash_md5(x[16:32])
t = n[0:4]
r = w + hash_md5(w + t)
n = n[4:]
m = b64decode(n)
h = [i for i in range(256)]
q = [ord(r[j%len(r)]) for j in range(256)]
o = 0
for i in range(256):
o = (o + h[i] + q[i]) % 256
tmp = h[i]
h[i] = h[o]
h[o] = tmp
v = o = 0
l = ''
for i in range(len(m)):
v = (v + 1) % 256
o = (o + h[v]) % 256
tmp = h[v]
h[v] = h[o]
h[o] = tmp
l += chr(ord(chr(m[i])) ^ (h[(h[v] + h[o]) % 256]))
return l[26:]
class TransCookie(object):
'''cookie字符串转为字典'''
def __init__(self, cookie)
self.cookie = cookie
def String2Dict(self):
itemDict = {}
items = self.cookie.split(';')
for item in items:
key = item.split('=')[0].strip()
value = item.split('=')[1]
itemDict[key] = value
return itemDict
if __name__ == '__main__'
cookie = input('输入字符串格式的cookie')
tc = TransCookie(cookie)
print(tc)
print(tc.String2Dict())
|
class Player:
def __init__(self, position_x, position_y, dimension_x, dimension_y):
self.score = 0
self.game = 0
self.sets = 0
self.position_x = position_x
self.position_y = position_y
self.dimension_x = dimension_x
self.dimension_y = dimension_y
def rein(self):
self.score = 0
self.game = 0
# reinitialise score
def rein_score(self):
self.score = 0
def update_score(self, val):
if self.score > 25:
self.score += val
else:
self.score = 15
return self.score
def update_game(self):
self.game += 1
def update_set(self):
self.sets += 1
|
from src.Realtime import Realtime
from argparse import ArgumentParser
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
ap = ArgumentParser()
ap.add_argument("-d", "--display", type=int, default=0,
help="Whether or not frames should be displayed")
ap.add_argument("-I", "--input-device", type=int, default=0,
help="Device number input")
ap.add_argument('-w', '--num-workers', dest='num_workers', type=int,
default=2, help='Number of workers.')
ap.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
ap.add_argument('-l', '--logger-debug', dest='logger_debug',
type=int, default=0, help='Print logger debug')
args = vars(ap.parse_args())
Realtime(args).start()
|
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import numpy as np
def f(x0, t, m, k, b):
x, dxdt = x0
d2xdt2 = -(b*dxdt+k*x)/m
return [dxdt, d2xdt2]
t = np.arange(0, 10, 0.0001)
x0 = [100, 0]
m = 5
k = 10
b = 2
y = odeint(f, x0, t, args=(m, k, b))
plt.title("Yieeet")
plt.plot(t, [0 for i in np.arange(len(t))], "b:", linewidth=1)
plt.plot(t, y[:,0], "r-", linewidth=2, label="x")
plt.plot(t, y[:,1], "b-", linewidth=2, label="dx/dt")
plt.xlabel("time")
plt.ylabel("meters and meters/second")
plt.legend(loc="upper right")
plt.show()
# def f(x0, t, b):
# x, v = x0
# acc = -2*b*t
# return [v, acc]
#
# a = 4
# b = 2
# x0 = [0, a]
# t = np.arange(0, 4, 0.001)
#
# y = odeint(f, x0, t, args=(b,))
#
# plt.title("Graph of position, velocity, and acceleration")
# plt.plot(t, [0 for i in np.arange(len(t))], "b:", linewidth=1)
# plt.plot(t, y[:,0], "y-", linewidth=1, label="x(t)")
# plt.plot(t, y[:,1], "r-", linewidth=1, label="v(t)")
# plt.plot(t, f(x0, t, b)[1], "b-", linewidth=2, label="a(t)")
# plt.xlabel("time")
# plt.ylabel("meters and meters/second")
# plt.legend()
# plt.show() |
from airflow.models import DAG
from airflow.operators import DummyOperator
def subdag(parent_dag_name, child_dag_name, args):
dag_subdag = DAG(
dag_id='%s.%s' % (parent_dag_name, child_dag_name),
default_args=args,
schedule_interval="@daily",
)
for i in range(5):
DummyOperator(
task_id='%s-task-%s' % (child_dag_name, i + 1),
default_args=args,
dag=dag_subdag,
)
return dag_subdag
|
x = input()
y = list(map(int, x.split()))
print(y)
print(type(y[0]))
|
# Exercício 8.5 - Livro
def pesquise(lista, valor):
if valor in lista:
return lista.index(valor)
return None
lista = ['Vinicius', 10, 'M']
print(pesquise(lista, 'M'))
|
import time
from numpy import random
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
WEIGHT_DOMAIN = 100
PROFIT_DOMAIN = 1000
CAPACITY_PROBABILITY = 0.3
# defines the number of times each algorithm will be processed to obtain the
# average time
num_rounds = 5
# alg_results = dict()
def knapSackNaive(W, P, c):
def ks(W, P, c, i):
if i == 0 or c == 0:
return 0
if W[i-1] > c:
return ks(W, P, c, i-1)
else:
return max(P[i-1] + ks(W, P, c - W[i-1], i-1), ks(W, P, c, i-1))
return ks(W, P, c, len(W))
def knapSackMem(W, P, c):
r = {}
def ks(W, P, c, i):
if i == 0 or c == 0:
return 0
if (c, i-1) in r:
return r[(c, i-1)]
if W[i-1] > c:
r[(c, i-1)] = ks(W, P, c, i-1)
else:
r[(c, i-1)] = \
max(P[i-1] + ks(W, P, c - W[i-1], i-1), ks(W, P, c, i-1))
return r[(c, i-1)]
return ks(W, P, c, len(W))
def knapSacTab(W, P, c):
W.insert(0, 0)
P.insert(0, 0)
T_columns = c + 1
T_rows = len(P)
T = [[None for y in range(T_columns)] for x in range(T_rows)]
for i in range(T_rows):
for j in range(T_columns):
if i == 0 or j == 0:
T[i][j] = 0
elif W[i] <= j:
T[i][j] = max(T[i-1][j], P[i] + T[i-1][j - W[i]])
else:
T[i][j] = T[i-1][j]
return T[-1][-1]
def plot(data):
df = pd.DataFrame.from_dict(data, orient='index', columns=['Time'])
df['Algorithm'] = [i.split("##")[0] for i in df.index]
df['Size'] = [int(i.split("##")[1]) for i in df.index]
# Defines font size and line width
sns.set(font_scale=1, style="ticks", rc={"lines.linewidth": 2})
# Defines plot size
plt.rcParams['figure.figsize'] = [20, 10]
chart = sns.lineplot(x='Size', y='Time', hue='Algorithm', data=df)
# plt.yscale('log')
chart.set(xticks=[i for i in df.Size])
plt.show()
# calculates the executions average time
def avgTime(func, size, debug=True):
t = 0
for i in range(num_rounds):
random.seed(size+i)
W = list(random.randint(WEIGHT_DOMAIN, size=size))
P = list(random.randint(PROFIT_DOMAIN, size=size))
c = random.randint(int(CAPACITY_PROBABILITY*size)*WEIGHT_DOMAIN)
start = time.time()
p = func(W, P, c)
end = time.time()
t += end - start
if debug:
# create a variable to store the debug results
if 'DR' not in globals():
global DR
DR = dict()
# add the result or check if it is the same
if (size, i) not in DR:
DR[(size, i)] = p
else:
assert p == DR[(size, i)]
return t / num_rounds
def run():
# defines the algorithms to be processed
algorithms = [knapSackMem, knapSacTab]
# algorithms = [knapSacTab]
sizes = [5, 10, 15, 20, 25]
# sizes = [100, 200, 300, 400, 500]
mapSizeToTime = dict()
for i in range(len(sizes)):
print(f"Starting collect {i+1}")
# map list size to algorithm average time
for algorithm in algorithms:
print(' > ', algorithm.__name__)
mapSizeToTime[f"{algorithm.__name__ }##{sizes[i]}"] = \
avgTime(algorithm, sizes[i], True)
print("Finish data collection")
plot(mapSizeToTime)
if __name__ == "__main__":
run()
|
"""Tests for markdown parser"""
import pytest
from github import PullRequest
from markdown import parse_linked_issues, ParsedIssue
ORG = "default_org"
REPO = "default_repo"
@pytest.mark.parametrize("input_text, expected_output", [
# Linked issues should be parsed, even ending with comma
[" see issue https://github.com/jlord/sheetsee.js/issues/26, which is ", [
ParsedIssue(issue_number=26, org="jlord", repo="sheetsee.js", closes=False),
]],
# Test that closes will close an issue, and a lack of close will not close it
["Closes #9876543, related to #76543, fixes #44", [
ParsedIssue(issue_number=9876543, org=ORG, repo=REPO, closes=True),
ParsedIssue(issue_number=76543, org=ORG, repo=REPO, closes=False),
ParsedIssue(issue_number=44, org=ORG, repo=REPO, closes=True),
]],
# Test that #xx and org/repo#xx can coexist and not be parsed as each other
["see #769, mitodl/mitxpro#94 and mitodl/open-discussions#76, and also #123 for more info", [
ParsedIssue(issue_number=769, org=ORG, repo=REPO, closes=False),
ParsedIssue(issue_number=94, org="mitodl", repo="mitxpro", closes=False),
ParsedIssue(issue_number=76, org="mitodl", repo="open-discussions", closes=False),
ParsedIssue(issue_number=123, org=ORG, repo=REPO, closes=False),
]],
# No issue links should be parsed
["nothing to see here", []],
# Parse GH-
["We don't use issue links like GH-543", [
ParsedIssue(issue_number=543, org=ORG, repo=REPO, closes=False)
]],
# ignore issues which are links
["Catch buffer overruns [#4104](https://github-redirect.dependabot.com/python-pillow/Pillow/issues/4104)", []],
# start issue at beginning of string
["#654 is the issue", [
ParsedIssue(issue_number=654, org=ORG, repo=REPO, closes=False),
]]
])
def test_parser(input_text, expected_output):
"""Test that parser is producing expected output"""
pr = PullRequest(
body=input_text,
number=1234,
title="title",
updatedAt=None,
org=ORG,
repo=REPO,
url=f"https://github.com/{ORG}/{REPO}.git",
)
assert parse_linked_issues(pr) == expected_output
|
#!/usr/bin/python
import sys
def main():
group = []
for line in sys.stdin:
line = line.strip()
if 0 == len(line):
sort(group)
group = []
print ''
continue
group.append(line)
sort(group)
def sort(group):
max_len = 0
result = []
for word in group:
max_len = max(max_len, len(word))
result.append(''.join(reversed(word)))
for drow in sorted(result):
print '{}{}'.format(' ' * (max_len - len(drow)), ''.join(reversed(drow)))
if '__main__' == __name__:
main()
|
# coding=utf-8
from flask import Flask, render_template, request
import os
def cambios_input_datos(X_test):
"""leemos csv y hacer la prediccion"""
path = "../models/"
loaded_model_CNN_C_ok = load(open(path + "best_model_CNN_C_opt_RMSprop.sav", "rb"))
prediction = loaded_model_CNN_C_ok.predict(X_test)
prediction_df = prediction_df.to_json()
return prediction_df #prediction
app = Flask(__name__)
@app.route('/')
def landing_page():
return render_template('index.html')
@app.route('/hypothesis')
def scatter():
return render_template('hypothesis.html')
@app.route('/mriimage', methods=['POST', 'GET'])
def image():
if request.method == 'POST':
# check if the post request has the file part
image = request.files['filename']
image.save(".", "image.png")
X_test = image.reshape(1,-1)
predict = cambios_input_datos(X_test)
print("Status == ", predict)
return render_template('mriimage.html')
if __name__ == '__main__':
app.run(host = '127.0.0.1', port = 6060, debug=True)
|
# FRACTAL TREE 3D-alternative coloured branch
import pygame
import os
import math
import pygame.gfxdraw
# colours co-ordination...(rgb value)
black, red, blue = (0, 0, 0), (255, 26, 26), (21, 71, 200)
# centered window screen...
os.environ["SDL_VIDEO_CENTERED"] = '1'
pygame.init()
pygame.display.set_caption("Fractal Tree 3D")
#window size...
width, height = 800, 800
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
def fractalTree(position, angle, z_value, n_value, direction, color=black, depth=0):
branch_ratio = 0.3
branch = z_value * branch_ratio
angle_x = branch * math.cos(direction)
angle_y = branch * math.sin(direction)
(x, y) = n_value
next_position = (x + angle_x, y + angle_y)
pygame.draw.line(screen, color, n_value, next_position)
if position > 0:
if depth % 2 == 1:
color1 = red
else:
color1 = blue
new = z_value * (1 - branch_ratio)
#recursive call....
fractalTree(position-1, angle, new, next_position, direction-angle, color1, depth+1)
fractalTree(position-1, angle, new, next_position, direction+angle+20, color1, depth+1)
speed = 0.01
def main():
#pygame.gfxdraw.line(screen, 10, 9, 29, 50, red)
angle = 0
while True:
clock.tick(120)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit();
angle += speed
screen.fill(black)
fractalTree(10, angle, height * 0.9, (width//2, width-50), -math.pi/2)
#fractalTree(10, angle+1, height * 0.9, (width//2, width-50), -math.pi/2)
pygame.display.update()
if __name__ == "__main__":
main()
pygame.QUIT
|
# Copyright (c) 2018 Amdocs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import six
def extra_specs_formatter(extra_specs):
return [{"keyName": k, "value": v}
for k, v in six.iteritems(extra_specs.extra_specs)]
def convert_vmsize_aai(vmsize):
body = {
'name': vmsize.name,
'vcpus': vmsize.number_of_cores,
'disk': vmsize.os_disk_size_in_mb,
'ram': vmsize.memory_in_mb
}
return body
|
class Item(object):
value = 0
weight = 0
def __init__(self, valuea, weighta):
self.value = valuea
self.weight = weighta
def weight(self):
return self.weight
def value(self):
return self.value |
handle = open('mail.txt')
counts=dict()
for line in handle:
if not line.startswith('From'): continue
l=line.split()
if len(l)<3:continue
t=l[len(l)-2][0:2]
counts[t]=counts.get(t,0)+1
lst=sorted(counts.items())
for v,k in lst[:]:
print(v,k)
print('change #1') |
T = int(input())
for _ in range(T):
n,x = map(int,input().split())
a = list(map(int,input().split()))
b = [[0 for _ in range(n)] for _ in range(n)]
for i in range(n):
for j in range(n):
b[i][j] = a[i]+a[j]
count = 0
stripSum = [[None] * n for i in range(n)]
for k in range(n):
for j in range(n):
Sum = 0
for i in range(k):
Sum += b[i][j]
stripSum[0][j] = Sum
for i in range(1, n - k + 1):
Sum += (b[i + k - 1][j] - b[i - 1][j])
stripSum[i][j] = Sum
for i in range(n - k + 1):
Sum = 0
for j in range(k):
Sum += stripSum[i][j]
if Sum == x:
count += 1
for j in range(1, n - k + 1):
Sum += (stripSum[i][j + k - 1] - stripSum[i][j - 1])
if Sum == x:
count += 1
print(count) |
liste1 = [3,6,123,54,927]
i = 0
while i < len(liste1):
print(liste1[i])
i += 1
liste2 = [3,6,123,54,927]
for inhalt in liste2:
print(inhalt)
dic = {"Marke":"VW","Modell":"Golf","Baujahr":"2011","Preis":5000}
for inhalt in dic:
print(inhalt,dic[inhalt])
liste = [12,18,3,6,46,234,23]
wert = eval(input("Welcher Wert soll dividiert werden?:"))
for n in liste:
if n == 0:
print("Fehler: Zahlen dürfen nicht durch 0 geteilt werden.")
continue
print(wert/n)
# quadratliste = [1,2,3,4,5,6,7,8,9,10]
#i = 0
#while i < 11:
# print(i*i)
# i = i + 1
#for n in range(1,11):
# print(n*n)
cities = ["Köln","Bonn","Berlin","Geilenkirchen","Dillingen","Ulm","Antwerpen","Brüssel","Düsseldorf","Gangelt"]
i = 0
print(cities)
for n in cities:
print(n)
while i < 10:
print(cities[i])
i += 1
while True:
zahl = eval(input("Welche Zahl möchten Sie verdoppeln?:"))
print("Doppelter Wert: ", zahl*2)
weiter = input("Möchten Sie fortfahren? (ja/nein) ")
if weiter != "ja":
break
def begruessung():
print("Herzlich Willkommen!")
begruessung()
|
"""
Search And Substitute (SAS)
"""
import re
from typing import Dict
def sas(content: str, pattern_and_repl: Dict[str, str]) -> str:
sub_content = content
for pattern, repl in pattern_and_repl.items():
sub_content = re.sub(pattern, repl, sub_content)
return sub_content
if __name__ == "__main__":
pass
|
def xhr_intercept_response(match_url, output, request_intercept_script=''):
return """
const intercept = (urlmatch, callback) => {{
let send = XMLHttpRequest.prototype.send;
XMLHttpRequest.prototype.send = function() {{
{request_intercept_script}
this.addEventListener('readystatechange', function() {{
if (this.responseURL.includes(urlmatch) && this.readyState === 4) {{
callback(this);
}}
}}, false);
send.apply(this, arguments);
}};
}};
let output = response => {{
var intercepted = document.getElementById('{output}')
if (intercepted === null) {{
intercepted = document.createElement('div');
intercepted.id = '{output}';
intercepted.responses = []
document.body.appendChild(intercepted);
}}
if (response.status === 204) {{
intercepted.responses.push(null)
}} else {{
intercepted.responses.push(JSON.parse(response.responseText))
}}
}};
intercept('{match_url}', output);
""".format(**locals())
|
chaine = "guitare"
caractere = "*"
i = 0
nchaine = chaine[0]
while i < len(chaine):
i = i+1
nchaine = nchaine + caractere + chaine[i]
print(nchaine)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.