text
stringlengths
1
93.6k
for arrow in skeleton_rec_list:
vis.remove_geometry(arrow)
for arrow in skeleton_input_list:
vis.remove_geometry(arrow)
for sphere in foot_sphere_rec_list:
vis.remove_geometry(sphere)
####################################### render results #############################
if args.render:
img_save_path_mesh_skel_rec = os.path.join(args.render_save_path, 'mesh_skel')
img_save_path_mesh_noisy = os.path.join(args.render_save_path, 'input')
os.makedirs(img_save_path_mesh_skel_rec) if not os.path.exists(img_save_path_mesh_skel_rec) else None
os.makedirs(img_save_path_mesh_noisy) if not os.path.exists(img_save_path_mesh_noisy) else None
H, W = 1080, 1920
########## read kinect color camera intrinsics
if args.dataset == 'egobody':
with open(os.path.join(args.dataset_root, 'kinect_cam_params', 'kinect_{}'.format(view), 'Color.json'), 'r') as f:
color_cam = json.load(f)
elif args.dataset == 'prox':
with open(os.path.join(args.dataset_root, 'calibration', 'Color.json'), 'r') as f:
color_cam = json.load(f)
[f_x, f_y] = color_cam['f']
[c_x, c_y] = color_cam['c']
camera, camera_pose, light = create_render_cam(cam_x=c_x, cam_y=c_y, fx=f_x, fy=f_y)
if args.dataset == 'egobody':
rgb_img_root = os.path.join(args.dataset_root, 'kinect_color', recording_name, view)
elif args.dataset == 'prox':
rgb_img_root = os.path.join(args.dataset_root, 'recordings', recording_name, 'Color')
rgb_frame_list = os.listdir(rgb_img_root)
rgb_frame_list = sorted(rgb_frame_list)
img_frame_idx = 0 # 0
print('[INFO] saving images...')
for bs in tqdm(range(0, n_seq, 1)):
for t in range(0, clip_len_rec, 1):
if args.dataset == 'egobody':
img_path = os.path.join(rgb_img_root, frame_name_list[bs, t] + '.jpg')
elif args.dataset == 'prox':
img_path = os.path.join(rgb_img_root, rgb_frame_list[img_frame_idx])
cur_img = cv2.imread(img_path)
cur_img = cur_img[:, :, ::-1]
if args.dataset == 'prox':
cur_img = cv2.undistort(cur_img.copy(), np.asarray(color_cam['camera_mtx']), np.asarray(color_cam['k']))
cur_img = cv2.flip(cur_img, 1)
########## read joint visibility mask
cur_joint_mask_vis = mask_joint_vis_list[bs, t] # [22]
cur_mask_joint_id = np.where(cur_joint_mask_vis == 0)[0].tolist()
########## create pyrender scenes
scene_rec_body = create_pyrender_scene(camera, camera_pose, light)
scene_noisy_body = create_pyrender_scene(camera, camera_pose, light)
scene_rec_skel = create_pyrender_scene(camera, camera_pose, light)
scene_noisy_skel = create_pyrender_scene(camera, camera_pose, light)
################### add body mesh
body_mesh_rec = create_pyrender_mesh(verts=smpl_verts_rec_list[bs, t], faces=smplx_neutral.faces, trans=cam2world, material=material_body_rec_vis)
body_mesh_input = create_pyrender_mesh(verts=smpl_verts_input_list[bs, t], faces=smplx_neutral.faces, trans=cam2world, material=material_body_noisy)
scene_rec_body.add(body_mesh_rec, 'mesh')
scene_noisy_body.add(body_mesh_input, 'mesh')
################## add body skeleton
skeleton_mesh_rec_list = create_pyrender_skel(joints=rec_ric_data_rec_list_from_smpl[bs, t], add_trans=np.linalg.inv(cam2world),
mask_scheme='video', mask_joint_id=cur_mask_joint_id,
add_contact=True, contact_lbl=contact_lbl_rec_list[bs, t])
for mesh in skeleton_mesh_rec_list:
scene_rec_skel.add(mesh, 'pred_joint')
################## render images
r = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H, point_size=1.0)
####### render: pred body
img_rec_body = render_img(r, scene_rec_body, alpha=0.9)
img_rec_skel = render_img(r, scene_rec_skel, alpha=1.0)
render_img_input = render_img_overlay(r, scene_noisy_body, cur_img)
render_img_rec = pil_img.fromarray((cur_img).astype(np.uint8))
render_img_rec.paste(img_rec_body, (0, 0), img_rec_body)
render_img_rec.paste(img_rec_skel, (0, 0), img_rec_skel)
if args.dataset == 'egobody':
render_img_input.save(os.path.join(img_save_path_mesh_noisy, frame_name_list[bs, t] + '.jpg'))
render_img_rec.save(os.path.join(img_save_path_mesh_skel_rec, frame_name_list[bs, t] + '.jpg'))
elif args.dataset == 'prox':
render_img_input.save(os.path.join(img_save_path_mesh_noisy, rgb_frame_list[img_frame_idx]))
render_img_rec.save(os.path.join(img_save_path_mesh_skel_rec, rgb_frame_list[img_frame_idx]))
img_frame_idx += 1
########################################### final metrics ###############################################
for recording_name in test_recording_name_list:
skating_list[recording_name] = np.concatenate(skating_list[recording_name], axis=0)
acc_list[recording_name] = np.concatenate(acc_list[recording_name], axis=0)
if args.dataset == 'egobody':
acc_error_list[recording_name] = np.concatenate(acc_error_list[recording_name], axis=0)
joint_mask_list[recording_name] = np.concatenate(joint_mask_list[recording_name], axis=0)
gmpjpe_list[recording_name] = np.concatenate(gmpjpe_list[recording_name], axis=0)
mpjpe_list[recording_name] = np.concatenate(mpjpe_list[recording_name], axis=0)
mpjpe_list_vis[recording_name] = np.concatenate(mpjpe_list_vis[recording_name], axis=0)
mpjpe_list_occ[recording_name] = np.concatenate(mpjpe_list_occ[recording_name], axis=0)
ground_pene_freq_list[recording_name] = np.concatenate(ground_pene_freq_list[recording_name], axis=0)