File size: 3,092 Bytes
861f9fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import sys
import time
import inspect

from transformers import AutoTokenizer
from typing import Any
import numpy as np
from tqdm import tqdm

import json
import argparse
import os

def parse_args():
    parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
    parser.add_argument(
        "--source_file",
        type=str,
    )
    parser.add_argument(
        "--max_length",
        type=int,
        default=512,
    )
    parser.add_argument(
        "--chunk_size",
        type=int,
        default=1024,
    )
    parser.add_argument(
        "--tokenizer_path",
        type=str,
    )
    args = parser.parse_args()
    return args


def get_tokenizer(tokenizer_path):
    tokenizer = tokenizer = AutoTokenizer.from_pretrained(
            tokenizer_path, use_fast=not False, trust_remote_code=False
        )
    # special_tokens_dict = {'additional_special_tokens': ['<pad>']}
    # tokenizer.add_special_tokens(special_tokens_dict)
    return tokenizer


def convert_data_to_id(tokenizer: AutoTokenizer, data: Any):
    input_ids = tokenizer.encode(data)
    ids = input_ids
    ids = np.array(ids, dtype=np.int32)
    return ids

args = parse_args()

tokenizer = get_tokenizer(args.tokenizer_path)
infile = open(args.source_file, 'r', encoding='utf-8')
file_name, _ = os.path.splitext(os.path.basename(args.source_file))

print("source file - ", args.source_file)
print('############ Start data reading ###########')

idx = 0
max_length = args.max_length
chunk_size = args.chunk_size

token_ids = np.array([], dtype=np.int32)

with open(file_name+'_streaming_'+str(max_length)+'.jsonl', 'w') as f:
    for line in infile:
        idx += 1
        if idx % 10000 == 0:
            print('Cur idx - ', idx)
        try:
            line = json.loads(line)
            cur_texts = []
            if 'text' in line:
                temp = line['text'] + "\n"
            elif 'raw_content_lines' in line:
                temp = "\n".join(line['raw_content_lines']) + "\n"
            else:
                print("error")
                exit()
            try:
                token_id = convert_data_to_id(tokenizer, temp)
                token_ids = np.concatenate((token_ids, token_id), dtype=np.int32)
            except UnicodeDecodeError:
                print('Error line - encoding: ', idx)

            if len(token_ids) > max_length*chunk_size:
                while len(token_ids) > max_length:
                    try:
                        temp_text = tokenizer.decode(token_ids[: max_length])
                        temp_dic = {'text': temp_text}
                        f.write(json.dumps(temp_dic) + "\n")
                        token_ids = token_ids[max_length:]
                    except UnicodeDecodeError:
                        print('Error line - decoding: ', idx)
                        token_ids = token_ids[max_length:]
                
        except:
            print("error source file - ", args.source_file)
            print('Error line: ', idx)
            continue

infile.close()