text
stringlengths 1
93.6k
|
|---|
wf.setframerate(RATE)
|
wf.writeframes(b''.join(frames))
|
# 视频录制线程
|
def record_video(stop_event):
|
# time.sleep(5)
|
cap = cv2.VideoCapture(0)
|
cap.set(cv2.CAP_PROP_FRAME_WIDTH, FRAME_WIDTH)
|
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT)
|
cap.set(cv2.CAP_PROP_FPS, FRAME_RATE)
|
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
out = cv2.VideoWriter(TEMP_VIDEO_FILE, fourcc, FRAME_RATE, (FRAME_WIDTH, FRAME_HEIGHT))
|
print("开始录像...")
|
while not stop_event.is_set():
|
ret, frame = cap.read()
|
if ret:
|
out.write(frame)
|
cv2.imshow('Recording Video', frame)
|
if cv2.waitKey(1) & 0xFF == ord('q'): # 按 Q 退出摄像头窗口
|
stop_event.set()
|
else:
|
break
|
print("录像结束。")
|
cap.release()
|
out.release()
|
cv2.destroyAllWindows()
|
# 合并音视频
|
def merge_audio_video(audio_file, video_file, output_file):
|
print("正在合并音频和视频...")
|
ffmpeg.input(video_file).output(audio_file, output_file, vcodec='copy', acodec='aac', strict='experimental').run(overwrite_output=True)
|
print(f"合并完成,文件保存为: {output_file}")
|
# 主函数
|
def main():
|
stop_event = threading.Event()
|
# 启动音频和视频录制线程
|
audio_thread = threading.Thread(target=record_audio, args=(stop_event,))
|
video_thread = threading.Thread(target=record_video, args=(stop_event,))
|
print("按 Enter 键开始录制...")
|
input() # 等待用户按下 Enter 键
|
print("录制中... 再次按 Enter 键停止录制。")
|
audio_thread.start()
|
video_thread.start()
|
input() # 等待用户再次按下 Enter 键
|
stop_event.set()
|
audio_thread.join()
|
video_thread.join()
|
# # 合并音频和视频
|
# merge_audio_video(TEMP_AUDIO_FILE, TEMP_VIDEO_FILE, OUTPUT_FILE)
|
# # 清理临时文件
|
# os.remove(TEMP_AUDIO_FILE)
|
# os.remove(TEMP_VIDEO_FILE)
|
print("录制完成!")
|
# -------------- Load QWen2-VL Model ------------
|
# default: Load the model on the available device(s)
|
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
"Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto"
|
)
|
# ------- 设置分辨率,降低现存占用 -------
|
min_pixels = 256*28*28
|
max_pixels = 512*28*28
|
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
# --------------------------------------
|
# -------- SenceVoice 语音识别 --模型加载-----
|
model_dir = r"E:\2_PYTHON\Project\GPT\QWen\pretrained_models\SenseVoiceSmall"
|
model_senceVoice = AutoModel( model=model_dir, trust_remote_code=True, )
|
if __name__ == "__main__":
|
while 1:
|
main()
|
folder_path = "./Test_QWen2_VL/"
|
os.makedirs(folder_path, exist_ok=True)
|
file_path = os.path.join(folder_path, "captured_image.jpg") # 设置保存路径
|
cap = cv2.VideoCapture(TEMP_VIDEO_FILE)
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
frame_index = int(total_frames // 2)
|
# 设置视频帧位置
|
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_index)
|
ret, frame = cap.read()
|
if not ret:
|
print(f"无法读取帧索引 {frame_index}")
|
else:
|
# 显示帧
|
cv2.imwrite(file_path, frame)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.