text
stringlengths 1
93.6k
|
|---|
args.dist_eval = False
|
dataset_val = None
|
else:
|
dataset_val, _ = build_dataset(is_train=False, args=args)
|
num_tasks = utils.get_world_size()
|
global_rank = utils.get_rank()
|
sampler_train = torch.utils.data.DistributedSampler(
|
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True, seed=args.seed,
|
)
|
print("Sampler_train = %s" % str(sampler_train))
|
if args.dist_eval:
|
if len(dataset_val) % num_tasks != 0:
|
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
|
'This will slightly alter validation results as extra duplicate entries are added to achieve '
|
'equal num of samples per-process.')
|
sampler_val = torch.utils.data.DistributedSampler(
|
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
|
else:
|
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
|
if global_rank == 0 and args.log_dir is not None:
|
os.makedirs(args.log_dir, exist_ok=True)
|
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
|
else:
|
log_writer = None
|
if global_rank == 0 and args.enable_wandb:
|
wandb_logger = utils.WandbLogger(args)
|
else:
|
wandb_logger = None
|
data_loader_train = torch.utils.data.DataLoader(
|
dataset_train, sampler=sampler_train,
|
batch_size=args.batch_size,
|
num_workers=args.num_workers,
|
pin_memory=args.pin_mem,
|
drop_last=True,
|
)
|
if dataset_val is not None:
|
data_loader_val = torch.utils.data.DataLoader(
|
dataset_val, sampler=sampler_val,
|
#batch_size=int(1.5 * args.batch_size),
|
batch_size=args.batch_size,
|
num_workers=args.num_workers,
|
pin_memory=args.pin_mem,
|
drop_last=False
|
)
|
else:
|
data_loader_val = None
|
mixup_fn = None
|
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
|
if mixup_active:
|
print("Mixup is activated!")
|
mixup_fn = Mixup(
|
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
|
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
|
label_smoothing=args.smoothing, num_classes=args.nb_classes)
|
model = create_model(
|
args.model,
|
pretrained=False,
|
num_classes=args.nb_classes,
|
drop_path_rate=args.drop_path,
|
layer_scale_init_value=args.layer_scale_init_value,
|
head_init_scale=args.head_init_scale,
|
variant=args.variant
|
)
|
if args.finetune:
|
if args.finetune.startswith('https'):
|
checkpoint = torch.hub.load_state_dict_from_url(
|
args.finetune, map_location='cpu', check_hash=True)
|
else:
|
checkpoint = torch.load(args.finetune, map_location='cpu')
|
print("Load ckpt from %s" % args.finetune)
|
checkpoint_model = None
|
for model_key in args.model_key.split('|'):
|
if model_key in checkpoint:
|
checkpoint_model = checkpoint[model_key]
|
print("Load state_dict by model_key = %s" % model_key)
|
break
|
if checkpoint_model is None:
|
checkpoint_model = checkpoint
|
state_dict = model.state_dict()
|
for k in ['head.weight', 'head.bias']:
|
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
|
print(f"Removing key {k} from pretrained checkpoint")
|
del checkpoint_model[k]
|
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
|
model.to(device)
|
model_ema = None
|
if args.model_ema:
|
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.