text
stringlengths
1
93.6k
cap.release()
cv2.destroyAllWindows()
# 检测 VAD 活动
def check_vad_activity(audio_data):
# 将音频数据分块检测
num = 0
step = int(AUDIO_RATE * 0.02) # 20ms 块大小
flag_rate = round(0.8 * len(audio_data) // step)
for i in range(0, len(audio_data), step):
chunk = audio_data[i:i + step]
if len(chunk) == step:
if vad.is_speech(chunk, sample_rate=AUDIO_RATE):
num += 1
if num > flag_rate:
return True
return False
# 保存音频和视频
def save_audio_video():
global segments_to_save, video_queue, last_vad_end_time, saved_intervals
if not segments_to_save:
return
# 获取有效段的时间范围
start_time = segments_to_save[0][1]
end_time = segments_to_save[-1][1]
# 检查是否与之前的片段重叠
if saved_intervals and saved_intervals[-1][1] >= start_time:
print("当前片段与之前片段重叠,跳过保存")
segments_to_save.clear()
return
# 保存音频
audio_frames = [seg[0] for seg in segments_to_save]
audio_output_path = f"{OUTPUT_DIR}/audio_0.wav"
wf = wave.open(audio_output_path, 'wb')
wf.setnchannels(AUDIO_CHANNELS)
wf.setsampwidth(2) # 16-bit PCM
wf.setframerate(AUDIO_RATE)
wf.writeframes(b''.join(audio_frames))
wf.close()
print(f"音频保存至 {audio_output_path}")
# 保存视频
video_frames = []
while not video_queue.empty():
frame, timestamp = video_queue.get()
if start_time <= timestamp <= end_time:
video_frames.append(frame)
if video_frames:
video_output_path = f"{OUTPUT_DIR}/video_0.avi"
out = cv2.VideoWriter(video_output_path, cv2.VideoWriter_fourcc(*'XVID'), 20.0, (640, 480))
for frame in video_frames:
out.write(frame)
out.release()
print(f"视频保存至 {video_output_path}")
# Inference()
else:
pass
# print("无可保存的视频帧")
# 记录保存的区间
saved_intervals.append((start_time, end_time))
# 清空缓冲区
segments_to_save.clear()
# --- 播放音频 -
def play_audio(file_path):
try:
pygame.mixer.init()
pygame.mixer.music.load(file_path)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
time.sleep(1) # 等待音频播放结束
print("播放完成!")
except Exception as e:
print(f"播放失败: {e}")
finally:
pygame.mixer.quit()
async def amain(TEXT, VOICE, OUTPUT_FILE) -> None:
"""Main function"""
communicate = edge_tts.Communicate(TEXT, VOICE)
await communicate.save(OUTPUT_FILE)
# # -------------- Load QWen2-VL Model ------------
# # default: Load the model on the available device(s)
# model = Qwen2VLForConditionalGeneration.from_pretrained(
# "Qwen/Qwen2-VL-2B-Instruct", torch_dtype="auto", device_map="auto"
# )
# # ------- 设置分辨率,降低现存占用 -------
# min_pixels = 256*28*28
# max_pixels = 512*28*28