text
stringlengths 1
93.6k
|
|---|
# cv2.imshow(f"Frame {frame_index}", frame)
|
# -------- SenceVoice 推理 ---------
|
input_file = (TEMP_AUDIO_FILE)
|
res = model_senceVoice.generate(
|
input=input_file,
|
cache={},
|
language="auto", # "zn", "en", "yue", "ja", "ko", "nospeech"
|
use_itn=False,
|
)
|
prompt = res[0]['text'].split(">")[-1]
|
# ---------SenceVoice --end----------
|
# -------- QWen2-VL 模型推理 ---------
|
messages = [
|
{
|
"role": "user",
|
"content": [
|
{
|
"type": "image",
|
"image": f"{file_path}",
|
},
|
{"type": "text", "text": f"{prompt}"},
|
],
|
}
|
]
|
# Preparation for inference
|
text = processor.apply_chat_template(
|
messages, tokenize=False, add_generation_prompt=True
|
)
|
image_inputs, video_inputs = process_vision_info(messages)
|
inputs = processor(
|
text=[text],
|
images=image_inputs,
|
videos=video_inputs,
|
padding=True,
|
return_tensors="pt",
|
)
|
inputs = inputs.to("cuda")
|
# Inference: Generation of the output
|
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
generated_ids_trimmed = [
|
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
]
|
output_text = processor.batch_decode(
|
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
)
|
print(output_text)
|
# 输入文本
|
text = output_text[0]
|
# asyncio.run(amain(text, "zh-CN-YunxiaNeural", os.path.join(folder_path,"sft_0.mp3")))
|
# play_audio(f'{folder_path}/sft_0.mp3')
|
asyncio.run(amain(text, "zh-CN-XiaoyiNeural", os.path.join(folder_path,"sft_0.mp3")))
|
play_audio(f'{folder_path}/sft_0.mp3')
|
# asyncio.run(amain(text, "zh-CN-YunjianNeural", os.path.join(folder_path,"sft_0.mp3")))
|
# play_audio(f'{folder_path}/sft_0.mp3')
|
# asyncio.run(amain(text, "zh-CN-shaanxi-XiaoniNeural", os.path.join(folder_path,"sft_0.mp3")))
|
# play_audio(f'{folder_path}/sft_0.mp3')
|
# <FILESEP>
|
"""
|
Computes the Underwater Image Quality Measure (UIQM)
|
metrics paper: https://ieeexplore.ieee.org/document/7305804
|
referenced from https://github.com/xahidbuffon/FUnIE-GAN/blob/master/Evaluation/uqim_utils.py
|
"""
|
from scipy import ndimage
|
from PIL import Image
|
import numpy as np
|
import math
|
def mu_a(x, alpha_L=0.1, alpha_R=0.1):
|
"""
|
Calculates the asymetric alpha-trimmed mean
|
"""
|
# sort pixels by intensity - for clipping
|
x = sorted(x)
|
# get number of pixels
|
K = len(x)
|
# calculate T alpha L and T alpha R
|
T_a_L = math.ceil(alpha_L * K)
|
T_a_R = math.floor(alpha_R * K)
|
# calculate mu_alpha weight
|
weight = (1 / (K - T_a_L - T_a_R))
|
# loop through flattened image starting at T_a_L+1 and ending at K-T_a_R
|
s = int(T_a_L + 1)
|
e = int(K - T_a_R)
|
val = sum(x[s:e])
|
val = weight * val
|
return val
|
def s_a(x, mu):
|
val = 0
|
for pixel in x:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.