text stringlengths 1 93.6k |
|---|
for dset in datasets:
|
for vcr_task in ["qa", "qar"]:
|
if is_train:
|
assert len(dset['db']) == len(dset['img'])
|
assert len(dset['tasks']) == len(dset['mix_ratio'])
|
img_db, img_db_gt = [], []
|
for img_path in dset['img']:
|
curr_img_db, curr_img_db_gt = load_img_feat(
|
img_path, all_img_dbs, opts)
|
img_db.append(curr_img_db)
|
img_db_gt.append(curr_img_db_gt)
|
else:
|
assert len(dset['db']) == len(dset['img']) == 1
|
img_db, img_db_gt = load_img_feat(
|
dset['img'][0], all_img_dbs, opts)
|
for i, t in enumerate(dset['tasks']):
|
task = f'{t}_{dset["name"]}'
|
if is_train:
|
LOGGER.info(
|
f"Loading {task} train dataset with vcr_{vcr_task}, "
|
f"{dset['db']}, {[img.img_dir for img in img_db]},"
|
f"{[img.img_dir for img in img_db_gt]}")
|
txt_db = [VcrTxtTokLmdb(path, opts.max_txt_len,
|
task=vcr_task)
|
for path in dset['db']]
|
else:
|
LOGGER.info(
|
f"Loading {task} val dataset with vcr_{vcr_task}, "
|
f"{dset['db']}, {img_db.img_dir},"
|
f"{img_db_gt.img_dir}")
|
txt_db = VcrTxtTokLmdb(dset['db'][0], -1,
|
task=vcr_task)
|
if task.startswith('mlm'):
|
dataset = build_mlm_dataset(
|
txt_db, img_db_gt, img_db, is_train, opts)
|
elif task.startswith('mrfr'):
|
dataset = build_mrfr_dataset(
|
txt_db, img_db_gt, img_db, is_train, opts)
|
elif task.startswith('mrc'):
|
dataset = build_mrc_dataset(
|
txt_db, img_db_gt, img_db, is_train, opts)
|
else:
|
raise ValueError(f'Undefined task {task}')
|
LOGGER.info(f"{len(dataset[0])*hvd.size()} samples loaded")
|
loader = build_dataloader(*dataset, is_train, opts)
|
if is_train:
|
ratio = dset['mix_ratio'][i]
|
dataloaders[task] = (loader, ratio)
|
else:
|
dataloaders[task] = PrefetchLoader(loader)
|
return dataloaders, all_img_dbs
|
def main(opts):
|
hvd.init()
|
n_gpu = hvd.size()
|
device = torch.device("cuda", hvd.local_rank())
|
torch.cuda.set_device(hvd.local_rank())
|
rank = hvd.rank()
|
opts.rank = rank
|
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
|
"16-bits training: {}".format(
|
device, n_gpu, hvd.rank(), opts.fp16))
|
if opts.gradient_accumulation_steps < 1:
|
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, "
|
"should be >= 1".format(
|
opts.gradient_accumulation_steps))
|
set_random_seed(opts.seed)
|
if rank == 0:
|
save_training_meta(opts)
|
TB_LOGGER.create(join(opts.output_dir, 'log'))
|
pbar = tqdm(total=opts.num_train_steps)
|
model_saver = ModelSaver(join(args.output_dir, 'ckpt'))
|
add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))
|
else:
|
LOGGER.disabled = True
|
pbar = NoOp()
|
model_saver = NoOp()
|
all_dbs = [db for datasets in [opts.train_datasets, opts.val_datasets]
|
for dset in datasets for db in dset['db']]
|
tokenizer = json.load(open(f'{all_dbs[0]}/meta.json'))['bert']
|
assert all(tokenizer == json.load(open(f'{db}/meta.json'))['bert']
|
for db in all_dbs)
|
# build data loaders
|
train_dataloaders, all_img_dbs = create_dataloaders(
|
opts.train_datasets, True, opts)
|
val_dataloaders, _ = create_dataloaders(
|
opts.val_datasets, False, opts, all_img_dbs)
|
meta_loader = MetaLoader(train_dataloaders,
|
accum_steps=opts.gradient_accumulation_steps,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.