text
stringlengths
1
93.6k
for dataset in ('K&H+N', 'BLESS', 'ROOT09', 'EVALution'):
for part in ('train', 'val', 'test'):
with open(os.path.join(dataset, part + '.tsv')) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym, relation = row[0], row[1], row[2]
if hyponym not in w2v or hypernym not in w2v:
continue
# (K&H+N, BLESS, ROOT09, EVALution)
if relation in ('hypo', 'hyper', 'HYPER', 'IsA') and hypernym not in positives_trusted[hyponym]:
positives_trusted[hyponym].append(hypernym)
elif relation in ('coord', 'Synonym'):
if hypernym not in negatives[hyponym]:
negatives[hyponym].append(hypernym)
if hyponym not in negatives[hypernym]:
negatives[hypernym].append(hyponym)
positives_untrusted = defaultdict(lambda: list())
with open('en_ps59g-rnk3-min100-nomwe-39k.csv') as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
hyponym, hypernym, frequency = row[0], row[1], float(row[2])
if hyponym in w2v and hypernym in w2v and hypernym not in positives_untrusted[hyponym]:
positives_untrusted[hyponym].append(hypernym)
keys_trusted = [k for k in positives_trusted.keys() if len(positives_trusted[k]) > 0]
trusted_train, trusted_validation_test = train_test_split(np.arange(len(keys_trusted), dtype='int32'), test_size=.4,
random_state=RANDOM_SEED)
trusted_validation, trusted_test = train_test_split(trusted_validation_test, test_size=.5, random_state=RANDOM_SEED)
hypernyms_train = {k: positives_trusted[k] for i in trusted_train for k in (keys_trusted[i],)}
for hyponym, hypernyms in positives_untrusted.items():
if hyponym in hypernyms_train:
for hypernym in hypernyms:
if not hypernym in hypernyms_train[hyponym]:
hypernyms_train[hyponym].append(hypernym)
hypernyms_validation = {k: positives_trusted[k] for i in trusted_validation for k in (keys_trusted[i],)}
hypernyms_test = {k: positives_trusted[k] for i in trusted_test for k in (keys_trusted[i],)}
subsumptions_train = [(x, y) for x, ys in hypernyms_train.items() for y in ys]
subsumptions_validation = [(x, y) for x, ys in hypernyms_validation.items() for y in ys]
subsumptions_test = [(x, y) for x, ys in hypernyms_test.items() for y in ys]
def write_subsumptions(subsumptions, filename):
with open(filename, 'w', newline='') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for pair in subsumptions:
writer.writerow(pair)
write_subsumptions(subsumptions_train, 'subsumptions-train.txt')
write_subsumptions(subsumptions_validation, 'subsumptions-validation.txt')
write_subsumptions(subsumptions_test, 'subsumptions-test.txt')
with open('synonyms.txt', 'w', newline='') as f:
writer = csv.writer(f, dialect='excel-tab', lineterminator='\n')
for word, words in negatives.items():
writer.writerow((word, ','.join(words)))
# <FILESEP>
"""
Description:
Notes:
Requirements:
pip install python-multipart
"""
import json
from loguru import logger
from asr.paraformer import ALIASR
from fastapi.responses import JSONResponse
from fastapi import FastAPI, File, UploadFile
from contextlib import asynccontextmanager
auto_asr: ALIASR = None # 全局变量
@asynccontextmanager
async def lifespan(app: FastAPI):
"""程序启动前加载模型"""
global auto_asr
auto_asr = ALIASR()
yield
"""销毁模型"""
auto_asr = None
app = FastAPI(lifespan=lifespan)
# 根目录访问的处理
@app.get("/")
async def read_root():
return json.dumps({"code": 0, "msg": "欢迎访问ASR", "data": ""})