text
stringlengths
1
93.6k
result = result.strip()
result = result.replace(".", "。")
result = result.replace(" ", "、")
# print(f"結果:{result}")
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", type=str, default="こんにちは。元気、ですかー?私はちゃんと元気だよ。")
parser.add_argument("--data-dir", type=str, default="data")
args = parser.parse_args()
initial_prompt = args.prompt
data_dir = args.data_dir
wavs_dir = os.path.join(data_dir, "wavs")
transcript_path = os.path.join(data_dir, "transcript_utf8.txt")
wav_paths = sorted(glob.glob(wavs_dir + "/**/*.wav", recursive=True))
print(f"wavファイルの数: {len(wav_paths)}")
model = load_whisper_model()
with open(transcript_path, "w", encoding="utf-8") as output:
for wav_file in tqdm(wav_paths, file=sys.stdout):
file_name = os.path.basename(wav_file)[:-4]
transcription = transcribe(
model, wav_file, initial_prompt, allow_multi_segment=True
)
if transcription is None:
continue
output.write(f"{file_name}:{transcription}\n")
print("書き起こし処理が完了しました。`data/transcript_utf8.txt`を確認して、必要なら修正してください。")
print("---")
# <FILESEP>
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import BertPreTrainedModel, BertModel
class BiEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = kwargs['bert']
def forward(self, context_input_ids, context_input_masks,
responses_input_ids, responses_input_masks, labels=None):
temperature = 0.05
# during training, only select the first response; using other instances in a batch as negative examples
if labels is not None:
responses_input_ids = responses_input_ids[:, 0, :].unsqueeze(1)
responses_input_masks = responses_input_masks[:, 0, :].unsqueeze(1)
context_vec = self.bert(context_input_ids, context_input_masks)[0][:,0,:] # [bs, dim]
context_vec = F.normalize(context_vec, dim=1)
batch_size, res_cnt, seq_length = responses_input_ids.shape
responses_input_ids = responses_input_ids.view(-1, seq_length)
responses_input_masks = responses_input_masks.view(-1, seq_length)
responses_vec = self.bert(responses_input_ids, responses_input_masks)[0][:,0,:] # [bs, dim]
responses_vec = responses_vec.view(batch_size, res_cnt, -1)
responses_vec = F.normalize(responses_vec, dim=2)
if labels is not None:
responses_vec = responses_vec.squeeze(1)
dot_product = torch.matmul(context_vec, responses_vec.t()) / temperature # [bs, bs]
mask = torch.eye(context_input_ids.size(0)).to(context_input_ids.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
else:
context_vec = context_vec.unsqueeze(1)
dot_product = torch.matmul(context_vec, responses_vec.permute(0, 2, 1)).squeeze()
return dot_product
class CrossEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = kwargs['bert']
self.linear = nn.Linear(config.hidden_size, 1)
def forward(self, text_input_ids, text_input_masks, text_input_segments, labels=None):
batch_size, neg, dim = text_input_ids.shape
text_input_ids = text_input_ids.reshape(-1, dim)
text_input_masks = text_input_masks.reshape(-1, dim)
text_input_segments = text_input_segments.reshape(-1, dim)
text_vec = self.bert(text_input_ids, text_input_masks, text_input_segments)[0][:,0,:] # [bs, dim]
score = self.linear(text_vec)
score = score.view(-1, neg)
if labels is not None:
loss = -F.log_softmax(score, -1)[:,0].mean()
return loss