text
stringlengths 1
93.6k
|
|---|
def intersection_check(p1, p2, p3, p4):
|
tc1 = (p1[0] - p2[0]) * (p3[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p3[0])
|
tc2 = (p1[0] - p2[0]) * (p4[1] - p1[1]) + (p1[1] - p2[1]) * (p1[0] - p4[0])
|
td1 = (p3[0] - p4[0]) * (p1[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p1[0])
|
td2 = (p3[0] - p4[0]) * (p2[1] - p3[1]) + (p3[1] - p4[1]) * (p3[0] - p2[0])
|
return tc1*tc2<0 and td1*td2<0
|
def draw_gaze_line(img, coord1, coord2, laser_flag):
|
if laser_flag == False:
|
# simple line
|
cv2.line(img, coord1, coord2, (0, 0, 255),2)
|
else:
|
# Laser mode :-)
|
beam_img = np.zeros(img.shape, np.uint8)
|
for t in range(20)[::-2]:
|
cv2.line(beam_img, coord1, coord2, (0, 0, 255-t*10), t*2)
|
img |= beam_img
|
def draw_spark(img, coord):
|
for i in range(20):
|
angle = random.random()*2*math.pi
|
dia = random.randrange(10,60)
|
x = coord[0] + int(math.cos(angle)*dia - math.sin(angle)*dia)
|
y = coord[1] + int(math.sin(angle)*dia + math.cos(angle)*dia)
|
cv2.line(img, coord, (x, y), (0, 255, 255), 2)
|
def usage():
|
print("""
|
Gaze estimation demo
|
'f': Flip image
|
'l': Laser mode on/off
|
's': Spark mode on/off
|
'b': Boundary box on/off
|
""")
|
def main():
|
usage()
|
boundary_box_flag = True
|
# Prep for face detection
|
ie = IECore()
|
net_det = ie.read_network(model=model_det+'.xml', weights=model_det+'.bin')
|
input_name_det = next(iter(net_det.input_info)) # Input blob name "data"
|
input_shape_det = net_det.input_info[input_name_det].tensor_desc.dims # [1,3,384,672]
|
out_name_det = next(iter(net_det.outputs)) # Output blob name "detection_out"
|
exec_net_det = ie.load_network(network=net_det, device_name='CPU', num_requests=1)
|
del net_det
|
# Preparation for landmark detection
|
net_lm = ie.read_network(model=model_lm+'.xml', weights=model_lm+'.bin')
|
input_name_lm = next(iter(net_lm.input_info)) # Input blob name
|
input_shape_lm = net_lm.input_info[input_name_lm].tensor_desc.dims # [1,3,60,60]
|
out_name_lm = next(iter(net_lm.outputs)) # Output blob name "embd/dim_red/conv"
|
out_shape_lm = net_lm.outputs[out_name_lm].shape # 3x [1,1]
|
exec_net_lm = ie.load_network(network=net_lm, device_name='CPU', num_requests=1)
|
del net_lm
|
# Preparation for headpose detection
|
net_hp = ie.read_network(model=model_hp+'.xml', weights=model_hp+'.bin')
|
input_name_hp = next(iter(net_hp.input_info)) # Input blob name
|
input_shape_hp = net_hp.input_info[input_name_hp].tensor_desc.dims # [1,3,60,60]
|
out_name_hp = next(iter(net_hp.outputs)) # Output blob name
|
out_shape_hp = net_hp.outputs[out_name_hp].shape # [1,70]
|
exec_net_hp = ie.load_network(network=net_hp, device_name='CPU', num_requests=1)
|
del net_hp
|
# Preparation for gaze estimation
|
net_gaze = ie.read_network(model=model_gaze+'.xml', weights=model_gaze+'.bin')
|
input_shape_gaze = [1, 3, 60, 60]
|
exec_net_gaze = ie.load_network(network=net_gaze, device_name='CPU')
|
del net_gaze
|
# Open USB webcams
|
cam = cv2.VideoCapture(0)
|
camx, camy = [(1920, 1080), (1280, 720), (800, 600), (480, 480)][1] # Set camera resolution [1]=1280,720
|
cam.set(cv2.CAP_PROP_FRAME_WIDTH , camx)
|
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camy)
|
laser_flag=True
|
flip_flag =True
|
spark_flag=True
|
while True:
|
ret,img = cam.read() # img won't be modified
|
if ret==False:
|
break
|
if flip_flag == True:
|
img = cv2.flip(img, 1) # flip image
|
out_img = img.copy() # out_img will be drawn and modified to make an display image
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.