text
stringlengths 1
93.6k
|
|---|
img1 = cv2.resize(img, (input_shape_det[_W], input_shape_det[_H]))
|
img1 = img1.transpose((2, 0, 1)) # Change data layout from HWC to CHW
|
img1 = img1.reshape(input_shape_det)
|
res_det = exec_net_det.infer(inputs={input_name_det: img1}) # Detect faces
|
gaze_lines = []
|
for obj in res_det[out_name_det][0][0]: # obj = [ image_id, label, conf, xmin, ymin, xmax, ymax ]
|
if obj[2] > 0.75: # Confidence > 75%
|
xmin = abs(int(obj[3] * img.shape[1]))
|
ymin = abs(int(obj[4] * img.shape[0]))
|
xmax = abs(int(obj[5] * img.shape[1]))
|
ymax = abs(int(obj[6] * img.shape[0]))
|
class_id = int(obj[1])
|
face=img[ymin:ymax,xmin:xmax] # Crop the face image
|
if boundary_box_flag == True:
|
cv2.rectangle(out_img, (xmin, ymin), (xmax, ymax), (255,255,0), 2)
|
# Find facial landmarks (to find eyes)
|
face1=cv2.resize(face, (input_shape_lm[_W], input_shape_lm[_H]))
|
face1=face1.transpose((2,0,1))
|
face1=face1.reshape(input_shape_lm)
|
res_lm = exec_net_lm.infer(inputs={input_name_lm: face1}) # Run landmark detection
|
lm=res_lm[out_name_lm][0][:8].reshape(4,2) # [[left0x, left0y], [left1x, left1y], [right0x, right0y], [right1x, right1y] ]
|
# Estimate head orientation (yaw=Y, pitch=X, role=Z)
|
res_hp = exec_net_hp.infer(inputs={input_name_hp: face1}) # Run head pose estimation
|
yaw = res_hp['angle_y_fc'][0][0]
|
pitch = res_hp['angle_p_fc'][0][0]
|
roll = res_hp['angle_r_fc'][0][0]
|
_X=0
|
_Y=1
|
# Landmark position memo... lm[1] (eye) lm[0] (nose) lm[2] (eye) lm[3]
|
eye_sizes = [ abs(int((lm[0][_X]-lm[1][_X]) * face.shape[1])), abs(int((lm[3][_X]-lm[2][_X]) * face.shape[1])) ] # eye size in the cropped face image
|
eye_centers = [ [ int(((lm[0][_X]+lm[1][_X])/2 * face.shape[1])), int(((lm[0][_Y]+lm[1][_Y])/2 * face.shape[0])) ],
|
[ int(((lm[3][_X]+lm[2][_X])/2 * face.shape[1])), int(((lm[3][_Y]+lm[2][_Y])/2 * face.shape[0])) ] ] # eye center coordinate in the cropped face image
|
if eye_sizes[0]<4 or eye_sizes[1]<4:
|
continue
|
ratio = 0.7
|
eyes = []
|
for i in range(2):
|
# Crop eye images
|
x1 = int(eye_centers[i][_X]-eye_sizes[i]*ratio)
|
x2 = int(eye_centers[i][_X]+eye_sizes[i]*ratio)
|
y1 = int(eye_centers[i][_Y]-eye_sizes[i]*ratio)
|
y2 = int(eye_centers[i][_Y]+eye_sizes[i]*ratio)
|
eyes.append(cv2.resize(face[y1:y2, x1:x2].copy(), (input_shape_gaze[_W], input_shape_gaze[_H]))) # crop and resize
|
# Draw eye boundary boxes
|
if boundary_box_flag == True:
|
cv2.rectangle(out_img, (x1+xmin,y1+ymin), (x2+xmin,y2+ymin), (0,255,0), 2)
|
# rotate eyes around Z axis to keep them level
|
if roll != 0.:
|
rotMat = cv2.getRotationMatrix2D((int(input_shape_gaze[_W]/2), int(input_shape_gaze[_H]/2)), roll, 1.0)
|
eyes[i] = cv2.warpAffine(eyes[i], rotMat, (input_shape_gaze[_W], input_shape_gaze[_H]), flags=cv2.INTER_LINEAR)
|
eyes[i] = eyes[i].transpose((2, 0, 1)) # Change data layout from HWC to CHW
|
eyes[i] = eyes[i].reshape((1,3,60,60))
|
hp_angle = [ yaw, pitch, 0 ] # head pose angle in degree
|
res_gaze = exec_net_gaze.infer(inputs={'left_eye_image' : eyes[0],
|
'right_eye_image' : eyes[1],
|
'head_pose_angles': hp_angle}) # gaze estimation
|
gaze_vec = res_gaze['gaze_vector'][0] # result is in orthogonal coordinate system (x,y,z. not yaw,pitch,roll)and not normalized
|
gaze_vec_norm = gaze_vec / np.linalg.norm(gaze_vec) # normalize the gaze vector
|
vcos = math.cos(math.radians(roll))
|
vsin = math.sin(math.radians(roll))
|
tmpx = gaze_vec_norm[0]*vcos + gaze_vec_norm[1]*vsin
|
tmpy = -gaze_vec_norm[0]*vsin + gaze_vec_norm[1]*vcos
|
gaze_vec_norm = [tmpx, tmpy]
|
# Store gaze line coordinations
|
for i in range(2):
|
coord1 = (eye_centers[i][_X]+xmin, eye_centers[i][_Y]+ymin)
|
coord2 = (eye_centers[i][_X]+xmin+int((gaze_vec_norm[0]+0.)*3000), eye_centers[i][_Y]+ymin-int((gaze_vec_norm[1]+0.)*3000))
|
gaze_lines.append([coord1, coord2, False]) # line(coord1, coord2); False=spark flag
|
# Gaze lines intersection check (for sparking)
|
if spark_flag == True:
|
for g1 in range(len(gaze_lines)):
|
for g2 in range(g1+1, len(gaze_lines)):
|
if gaze_lines[g1][2]==True or gaze_lines[g2][2]==True:
|
continue # Skip if either line has already marked as crossed
|
x1 = gaze_lines[g1][0]
|
y1 = gaze_lines[g1][1]
|
x2 = gaze_lines[g2][0]
|
y2 = gaze_lines[g2][1]
|
if intersection_check(x1, y1, x2, y2) == True:
|
l1 = line(x1, y1)
|
l2 = line(x2, y2)
|
x, y = intersection( l1, l2 ) # calculate crossing coordinate
|
gaze_lines[g1][1] = [int(x), int(y)]
|
gaze_lines[g1][2] = True
|
gaze_lines[g2][1] = [int(x), int(y)]
|
gaze_lines[g2][2] = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.