focus-data / retrieve_long_doc.py
leezythu's picture
Add files using upload-large-folder tool
861f9fc verified
import json
from transformers import AutoTokenizer
from typing import Any
import numpy as np
def convert_data_to_id(tokenizer: AutoTokenizer, data: Any):
input_ids = tokenizer.encode(data)
ids = input_ids
ids = np.array(ids, dtype=np.int32)
return ids
def get_tokenizer(tokenizer_path):
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path, use_fast=not False, trust_remote_code=False
)
return tokenizer
#config
source_file = "../redstone_v4_23_json/mix_splits/mixed_redstone_part_20.jsonl"
out_file = "256k_docs_for_test_qwen.jsonl"
tokenizer_path = "../Qwen2.5-1.5B"
min_len = 256*1024
retri_num = 1000
tokenizer = get_tokenizer(tokenizer_path)
idx = 0
succ_cnt = 0
out_f = open(out_file,'w')
with open(source_file) as f:
for line in f:
idx += 1
if idx % 10000 == 0:
print('Cur idx - ', idx)
line = json.loads(line)
cur_texts = []
if 'text' in line:
temp = line['text']
elif 'raw_content_lines' in line:
temp = "\n".join(line['raw_content_lines'])
else:
print("error")
exit()
try:
token_id = convert_data_to_id(tokenizer, temp)
except UnicodeDecodeError:
print('Error line - encoding: ', idx)
if len(token_id) > min_len:
temp_dic = {'text': temp}
out_f.write(json.dumps(temp_dic) +"\n")
succ_cnt += 1
if succ_cnt % 10==0:
print("succ_cnt:",succ_cnt)
if succ_cnt==1000:
break
out_f.close()
print(f"retrieve {succ_cnt} docs longer than {min_len} from {idx} docs.")