File size: 1,523 Bytes
861f9fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import sys
import time
import inspect
from transformers import AutoTokenizer
from typing import Any
import numpy as np
from tqdm import tqdm
import json
import argparse
import os
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--batch_size",
type=int,
default=512,
)
parser.add_argument(
"--source_file",
type=str,
)
parser.add_argument(
"--chunk_size",
type=int,
default=512,
)
args = parser.parse_args()
return args
args = parse_args()
print('args.source_file',args.source_file)
data = open(args.source_file).readlines()
base_name = os.path.basename(args.source_file)
file_name, _ = os.path.splitext(base_name)
bs = args.batch_size
print('############ Start data reading ###########')
local_cnt = 0
temp_dic_list = []
dic_list = []
chunk_size = args.chunk_size
for idx, line in enumerate(data):
temp_dic = json.loads(line)
temp_dic_list.append(temp_dic)
local_cnt = local_cnt + 1
if local_cnt == chunk_size:
local_cnt = 0
dic_list.append(temp_dic_list)
temp_dic_list = []
print("len(dic_list)",len(dic_list))
with open(file_name+'_bs_'+str(bs)+'.jsonl', 'w') as f:
for idx in range(0, len(dic_list)-bs, bs):
for line_i in range(len(dic_list[0])):
for i in range(bs):
f.write(json.dumps(dic_list[idx+i][line_i]) + "\n") |