|
|
import sys |
|
|
import time |
|
|
import inspect |
|
|
|
|
|
from transformers import AutoTokenizer |
|
|
from typing import Any |
|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
|
|
|
import json |
|
|
import argparse |
|
|
import os |
|
|
|
|
|
def parse_args(): |
|
|
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") |
|
|
parser.add_argument( |
|
|
"--source_file", |
|
|
type=str, |
|
|
) |
|
|
parser.add_argument( |
|
|
"--max_length", |
|
|
type=int, |
|
|
default=512, |
|
|
) |
|
|
parser.add_argument( |
|
|
"--chunk_size", |
|
|
type=int, |
|
|
default=1024, |
|
|
) |
|
|
parser.add_argument( |
|
|
"--tokenizer_path", |
|
|
type=str, |
|
|
) |
|
|
args = parser.parse_args() |
|
|
return args |
|
|
|
|
|
|
|
|
def get_tokenizer(tokenizer_path): |
|
|
tokenizer = tokenizer = AutoTokenizer.from_pretrained( |
|
|
tokenizer_path, use_fast=not False, trust_remote_code=False |
|
|
) |
|
|
|
|
|
|
|
|
return tokenizer |
|
|
|
|
|
|
|
|
def convert_data_to_id(tokenizer: AutoTokenizer, data: Any): |
|
|
input_ids = tokenizer.encode(data) |
|
|
ids = input_ids |
|
|
ids = np.array(ids, dtype=np.int32) |
|
|
return ids |
|
|
|
|
|
args = parse_args() |
|
|
|
|
|
tokenizer = get_tokenizer(args.tokenizer_path) |
|
|
infile = open(args.source_file, 'r', encoding='utf-8') |
|
|
file_name, _ = os.path.splitext(os.path.basename(args.source_file)) |
|
|
|
|
|
print("source file - ", args.source_file) |
|
|
print('############ Start data reading ###########') |
|
|
|
|
|
idx = 0 |
|
|
max_length = args.max_length |
|
|
chunk_size = args.chunk_size |
|
|
|
|
|
token_ids = np.array([], dtype=np.int32) |
|
|
|
|
|
with open(file_name+'_streaming_'+str(max_length)+'.jsonl', 'w') as f: |
|
|
for line in infile: |
|
|
idx += 1 |
|
|
if idx % 10000 == 0: |
|
|
print('Cur idx - ', idx) |
|
|
try: |
|
|
line = json.loads(line) |
|
|
cur_texts = [] |
|
|
if 'text' in line: |
|
|
temp = line['text'] + "\n" |
|
|
elif 'raw_content_lines' in line: |
|
|
temp = "\n".join(line['raw_content_lines']) + "\n" |
|
|
else: |
|
|
print("error") |
|
|
exit() |
|
|
try: |
|
|
token_id = convert_data_to_id(tokenizer, temp) |
|
|
token_ids = np.concatenate((token_ids, token_id), dtype=np.int32) |
|
|
except UnicodeDecodeError: |
|
|
print('Error line - encoding: ', idx) |
|
|
|
|
|
if len(token_ids) > max_length*chunk_size: |
|
|
while len(token_ids) > max_length: |
|
|
try: |
|
|
temp_text = tokenizer.decode(token_ids[: max_length]) |
|
|
temp_dic = {'text': temp_text} |
|
|
f.write(json.dumps(temp_dic) + "\n") |
|
|
token_ids = token_ids[max_length:] |
|
|
except UnicodeDecodeError: |
|
|
print('Error line - decoding: ', idx) |
|
|
token_ids = token_ids[max_length:] |
|
|
|
|
|
except: |
|
|
print("error source file - ", args.source_file) |
|
|
print('Error line: ', idx) |
|
|
continue |
|
|
|
|
|
infile.close() |