text
stringlengths
1
93.6k
from qwen_vl_utils import process_vision_info
import torch
from funasr import AutoModel
import pygame
import edge_tts
import asyncio
from time import sleep
# 参数设置
AUDIO_RATE = 16000 # 音频采样率
AUDIO_CHANNELS = 1 # 单声道
CHUNK = 1024 # 音频块大小
VAD_MODE = 3 # VAD 模式 (0-3, 数字越大越敏感)
OUTPUT_DIR = "./output" # 输出目录
NO_SPEECH_THRESHOLD = 1 # 无效语音阈值,单位:秒
# 确保输出目录存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
# 队列用于音频和视频同步缓存
audio_queue = Queue()
video_queue = Queue()
# 全局变量
last_active_time = time.time()
recording_active = True
segments_to_save = []
saved_intervals = []
last_vad_end_time = 0 # 上次保存的 VAD 有效段结束时间
# 初始化 WebRTC VAD
vad = webrtcvad.Vad()
vad.set_mode(VAD_MODE)
# 音频录制线程
def audio_recorder():
global audio_queue, recording_active, last_active_time, segments_to_save, last_vad_end_time
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=AUDIO_CHANNELS,
rate=AUDIO_RATE,
input=True,
frames_per_buffer=CHUNK)
audio_buffer = []
print("音频录制已开始")
while recording_active:
data = stream.read(CHUNK)
audio_buffer.append(data)
# 每 0.5 秒检测一次 VAD
if len(audio_buffer) * CHUNK / AUDIO_RATE >= 0.5:
# 拼接音频数据并检测 VAD
raw_audio = b''.join(audio_buffer)
vad_result = check_vad_activity(raw_audio)
if vad_result:
print("检测到语音活动")
last_active_time = time.time()
segments_to_save.append((raw_audio, time.time()))
else:
print("静音中...")
audio_buffer = [] # 清空缓冲区
# 检查无效语音时间
if time.time() - last_active_time > NO_SPEECH_THRESHOLD:
# 检查是否需要保存
if segments_to_save and segments_to_save[-1][1] > last_vad_end_time:
save_audio_video()
last_active_time = time.time()
else:
pass
# print("无新增语音段,跳过保存")
stream.stop_stream()
stream.close()
p.terminate()
# 视频录制线程
def video_recorder():
global video_queue, recording_active
cap = cv2.VideoCapture(0) # 使用默认摄像头
print("视频录制已开始")
while recording_active:
ret, frame = cap.read()
if ret:
video_queue.put((frame, time.time()))
# 实时显示摄像头画面
cv2.imshow("Real Camera", frame)
if cv2.waitKey(1) & 0xFF == ord('q'): # 按 Q 键退出
break
else:
print("无法获取摄像头画面")