File size: 1,678 Bytes
861f9fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import json
from transformers import AutoTokenizer
from typing import Any
import numpy as np

def convert_data_to_id(tokenizer: AutoTokenizer, data: Any):
    input_ids = tokenizer.encode(data)
    ids = input_ids
    ids = np.array(ids, dtype=np.int32)
    return ids

def get_tokenizer(tokenizer_path):
    tokenizer = AutoTokenizer.from_pretrained(
            tokenizer_path, use_fast=not False, trust_remote_code=False
        )
    return tokenizer

#config
source_file = "../redstone_v4_23_json/mix_splits/mixed_redstone_part_20.jsonl"
out_file = "256k_docs_for_test_qwen.jsonl"
tokenizer_path = "../Qwen2.5-1.5B"
min_len = 256*1024
retri_num = 1000

tokenizer = get_tokenizer(tokenizer_path)
idx = 0
succ_cnt = 0
out_f = open(out_file,'w')

with open(source_file) as f:
    for line in f:
        idx += 1
        if idx % 10000 == 0:
            print('Cur idx - ', idx)
        line = json.loads(line)
        cur_texts = []
        if 'text' in line:
            temp = line['text']
        elif 'raw_content_lines' in line:
            temp = "\n".join(line['raw_content_lines'])
        else:
            print("error")
            exit()
        try:
            token_id = convert_data_to_id(tokenizer, temp)
        except UnicodeDecodeError:
            print('Error line - encoding: ', idx)
        if len(token_id) > min_len:
            temp_dic = {'text': temp}
            out_f.write(json.dumps(temp_dic) +"\n")
            succ_cnt += 1
            if succ_cnt % 10==0:
                print("succ_cnt:",succ_cnt)
            if succ_cnt==1000:
                break
out_f.close()
print(f"retrieve {succ_cnt} docs longer than {min_len} from {idx} docs.")