text stringlengths 1 93.6k |
|---|
def setup(args):
|
"""
|
Create configs and perform basic setups.
|
"""
|
cfg = get_cfg()
|
cfg.merge_from_file(args.config_file)
|
cfg.merge_from_list(args.opts)
|
# setup dist # comment this out
|
# cfg.DIST_INIT_PATH = "tcp://{}:12399".format(os.environ["SLURMD_NODENAME"])
|
# setup output dir
|
# output_dir / data_name / feature_name / lr_wd / run1
|
output_dir = cfg.OUTPUT_DIR
|
lr = cfg.SOLVER.BASE_LR
|
wd = cfg.SOLVER.WEIGHT_DECAY
|
# turn these two lines to True to enable final model saving.
|
# cfg.MODEL.SAVE_CKPT_FINALRUNS = True
|
# cfg.MODEL.SAVE_CKPT = True
|
current_time = time.strftime("%H:%M:%S", time.localtime())
|
print("Self-added Current time (Applied for debugging), In train.py")
|
output_folder = os.path.join(cfg.DATA.NAME, cfg.DATA.FEATURE, f"lr{lr}_wd{wd}", current_time)
|
# train cfg.RUN_N_TIMES times
|
count = 1
|
while count <= cfg.RUN_N_TIMES:
|
output_path = os.path.join(output_dir, output_folder, f"run{count}")
|
# pause for a random time, so concurrent process with same setting won't interfere with each other. # noqa
|
sleep(randint(3, 30))
|
if not PathManager.exists(output_path):
|
PathManager.mkdirs(output_path)
|
cfg.OUTPUT_DIR = output_path
|
break
|
else:
|
count += 1
|
if count > cfg.RUN_N_TIMES:
|
raise ValueError(
|
f"Already run {cfg.RUN_N_TIMES} times for {output_folder}, no need to run more")
|
cfg.freeze()
|
return cfg
|
def get_loaders(cfg, logger):
|
logger.info("Loading training data (final training data for vtab)...")
|
if cfg.DATA.NAME.startswith("vtab-"):
|
train_loader = data_loader.construct_trainval_loader(cfg)
|
else:
|
train_loader = data_loader.construct_train_loader(cfg)
|
logger.info("Loading validation data...")
|
# not really needed for vtab
|
val_loader = data_loader.construct_val_loader(cfg)
|
logger.info("Loading test data...")
|
if cfg.DATA.NO_TEST:
|
logger.info("...no test data is constructed")
|
test_loader = None
|
else:
|
logger.info("test data is constructed")
|
test_loader = data_loader.construct_test_loader(cfg)
|
return train_loader, val_loader, test_loader
|
def train(cfg, args):
|
# clear up residual cache from previous runs
|
if torch.cuda.is_available():
|
torch.cuda.empty_cache()
|
# main training / eval actions here
|
# fix the seed for reproducibility
|
if cfg.SEED is not None:
|
torch.manual_seed(cfg.SEED)
|
np.random.seed(cfg.SEED)
|
random.seed(0)
|
# setup training env including loggers
|
logging_train_setup(args, cfg)
|
logger = logging.get_logger("visual_prompt")
|
train_loader, val_loader, test_loader = get_loaders(cfg, logger)
|
logger.info("Constructing models...")
|
model, cur_device = build_model(cfg)
|
logger.info("Setting up Evalutator...")
|
evaluator = Evaluator()
|
logger.info("Setting up Trainer...")
|
trainer = Trainer(cfg, model, evaluator, cur_device)
|
if train_loader:
|
trainer.train_classifier(train_loader, val_loader, test_loader)
|
else:
|
print("No train loader presented. Exit")
|
if cfg.SOLVER.TOTAL_EPOCH == 0:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.