text stringlengths 1 93.6k |
|---|
from apex import amp
|
from horovod import torch as hvd
|
from tqdm import tqdm
|
from data import (TokenBucketSampler,
|
MetaLoader, PrefetchLoader, DetectFeatLmdb,
|
VcrTxtTokLmdb, ImageLmdbGroup, ConcatDatasetWithLens,
|
MlmDatasetForVCR, mlm_collate_for_vcr,
|
MrfrDatasetForVCR, mrfr_collate_for_vcr,
|
MrcDatasetForVCR, mrc_collate_for_vcr)
|
from model.pretrain_vcr import UniterForPretrainingForVCR
|
from optim import get_lr_sched
|
from optim.misc import build_optimizer
|
from utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file
|
from utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,
|
broadcast_tensors)
|
from utils.save import ModelSaver, save_training_meta
|
from utils.misc import NoOp, parse_with_config, set_dropout, set_random_seed
|
from utils.const import IMG_DIM, IMG_LABEL_DIM, BUCKET_SIZE
|
NUM_SPECIAL_TOKENS = 81
|
def build_dataloader(dataset, collate_fn, is_train, opts):
|
if is_train:
|
batch_size = opts.train_batch_size
|
else:
|
batch_size = opts.val_batch_size
|
sampler = TokenBucketSampler(dataset.lens, bucket_size=BUCKET_SIZE,
|
batch_size=batch_size, droplast=is_train)
|
loader = DataLoader(dataset, batch_sampler=sampler,
|
num_workers=opts.n_workers, pin_memory=opts.pin_mem,
|
collate_fn=collate_fn)
|
return loader
|
def build_mlm_dataset(txt_db, img_db_gt, img_db, is_train, opts):
|
if is_train:
|
collate_fn = mlm_collate_for_vcr
|
datasets = [MlmDatasetForVCR(t, i_gt, i)
|
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
|
dataset = ConcatDatasetWithLens(datasets)
|
else:
|
collate_fn = mlm_collate_for_vcr
|
dataset = MlmDatasetForVCR(txt_db, img_db_gt, img_db)
|
return dataset, collate_fn
|
def build_mrfr_dataset(txt_db, img_db_gt, img_db, is_train, opts):
|
if is_train:
|
datasets = [MrfrDatasetForVCR(opts.mrm_prob, t, i_gt, i)
|
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
|
dataset = ConcatDatasetWithLens(datasets)
|
else:
|
dataset = MrfrDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
|
return dataset, mrfr_collate_for_vcr
|
def build_mrc_dataset(txt_db, img_db_gt, img_db, is_train, opts):
|
if is_train:
|
datasets = [MrcDatasetForVCR(opts.mrm_prob, t, i_gt, i)
|
for t, i_gt, i in zip(txt_db, img_db_gt, img_db)]
|
dataset = ConcatDatasetWithLens(datasets)
|
else:
|
dataset = MrcDatasetForVCR(opts.mrm_prob, txt_db, img_db_gt, img_db)
|
return dataset, mrc_collate_for_vcr
|
def load_img_feat(db_list, all_img_dbs, opts):
|
db_ = db_list.split(";")
|
assert len(db_) <= 2, "More than two img_dbs found"
|
gt_db_path, db_path = "", ""
|
for d in db_:
|
if "gt" in d:
|
gt_db_path = d
|
else:
|
db_path = d
|
if gt_db_path != "":
|
img_db_gt = DetectFeatLmdb(
|
gt_db_path, -1, opts.max_bb, opts.min_bb, 100,
|
opts.compressed_db)
|
all_img_dbs.path2imgdb[gt_db_path] = img_db_gt
|
else:
|
img_db_gt = None
|
img_db = all_img_dbs[db_path] if db_path != "" else None
|
all_img_dbs.path2imgdb[db_path] = img_db
|
return img_db, img_db_gt
|
def create_dataloaders(datasets, is_train, opts, all_img_dbs=None):
|
if all_img_dbs is None:
|
all_img_dbs = ImageLmdbGroup(opts.conf_th, opts.max_bb, opts.min_bb,
|
opts.num_bb, opts.compressed_db)
|
dataloaders = {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.