focus-data / preprocess_longtext_streaming_step2.py
leezythu's picture
Add files using upload-large-folder tool
861f9fc verified
import sys
import time
import inspect
from transformers import AutoTokenizer
from typing import Any
import numpy as np
from tqdm import tqdm
import json
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--batch_size",
type=int,
default=512,
)
parser.add_argument(
"--source_file",
type=str,
)
parser.add_argument(
"--chunk_size",
type=int,
default=512,
)
args = parser.parse_args()
return args
args = parse_args()
print('args.source_file',args.source_file)
data = open(args.source_file).readlines()
base_name = os.path.basename(args.source_file)
file_name, _ = os.path.splitext(base_name)
bs = args.batch_size
print('############ Start data reading ###########')
local_cnt = 0
temp_dic_list = []
dic_list = []
chunk_size = args.chunk_size
for idx, line in enumerate(data):
temp_dic = json.loads(line)
temp_dic_list.append(temp_dic)
local_cnt = local_cnt + 1
if local_cnt == chunk_size:
local_cnt = 0
dic_list.append(temp_dic_list)
temp_dic_list = []
print("len(dic_list)",len(dic_list))
with open(file_name+'_bs_'+str(bs)+'.jsonl', 'w') as f:
for idx in range(0, len(dic_list)-bs, bs):
for line_i in range(len(dic_list[0])):
for i in range(bs):
f.write(json.dumps(dic_list[idx+i][line_i]) + "\n")