text
stringlengths
1
93.6k
for enhancing_iteration in tqdm(range(num_iterations), desc="Enhancing iterations"):
# Opening the video and extracting essential properties
video = cv2.VideoCapture(str(mp4))
original_video_fps = video.get(cv2.CAP_PROP_FPS)
width, height = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
original_num_frames = sum(video.read()[0] for _ in range(int(video.get(cv2.CAP_PROP_FRAME_COUNT))))
# Informing the user of video details before processing
print(f"Video Name: {original_seq_name}")
print(f"Original Frame Rate (FPS): {original_video_fps}")
print(f"Original Total Number of Frames: {original_num_frames}")
img_array = []
# Processing each set of 4 frames for frame rate enhancement
for t in tqdm(range(0, original_num_frames - 3), desc="Processing frames"):
video.set(cv2.CAP_PROP_POS_FRAMES, t)
_, rawFrame0 = video.read()
_, rawFrame1 = video.read()
_, rawFrame2 = video.read()
_, rawFrame3 = video.read()
# If any frame in the set of 4 is missing, stop processing
if any(frame is None for frame in [rawFrame0, rawFrame1, rawFrame2, rawFrame3]):
break
# Convert frames to tensors and move them to GPU
frame0 = TF.to_tensor(rawFrame0)[None, ...].cuda()
frame1 = TF.to_tensor(rawFrame1)[None, ...].cuda()
frame2 = TF.to_tensor(rawFrame2)[None, ...].cuda()
frame3 = TF.to_tensor(rawFrame3)[None, ...].cuda()
# Use the trained model to predict enhanced frames
with torch.no_grad():
out = self.model(frame0, frame1, frame2, frame3)
# Special handling for the very first
if t == 0:
img_array += [tensor2rgb(frame0)[0]] * 2 + [tensor2rgb(frame1)[0]]
img_array += [tensor2rgb(out)[0], tensor2rgb(frame2)[0]]
# Special handling for the last sets of frames
if t == original_num_frames - 4:
img_array += [tensor2rgb(frame3)[0]] * 2
video.release()
# Decide the output video's fps
new_num_frames = len(img_array)
output_fps = (
new_num_frames * original_video_fps
) / original_num_frames # Compute the fps that keeps video playback constant (duration of video)
if (not keep_original_duration) and (custom_fps is not None) and (custom_fps >= 1):
output_fps = custom_fps
# Create and write frames to the output video
avi_outname = f"{original_seq_name}_{enhancing_iteration}.avi"
new_num_frames = len(img_array)
print(f"Output filename: {avi_outname}")
print(f"New Total Number of Frames: {new_num_frames}")
cv2writer = cv2.VideoWriter(
avi_outname,
cv2.VideoWriter_fourcc(*"DIVX"), # NOTE: codec issues mean we have to export as avi using DIVX
output_fps,
(width, height),
)
for frame in img_array:
cv2writer.write(frame)
cv2writer.release()
# Convert the AVI video to MP4 format using ffmpeg (NOTE: We use ffmpeg because we have codec issues with cv2 and mp4)
mp4_outname = avi_outname.replace(".avi", ".mp4")
cmd = ["ffmpeg", "-i", avi_outname, mp4_outname, "-y"]
subprocess.run(cmd)
# Append the output path and prepare for the next iteration if needed
mp4 = mp4_outname
yield Path(mp4_outname)
# <FILESEP>
'''
main.py
----------
Matthew Chatham
June 6, 2018
Given a company's landing page on Glassdoor and an output filename, scrape the
following information about each employee review:
Review date
Employee position
Employee location
Employee status (current/former)
Review title
Number of helpful votes
Pros text
Cons text
Advice to mgmttext