text
stringlengths
1
93.6k
collate_fns=[vqa_collate_fn,None])
if not args.evaluate:
print("Creating model for searching")
search_model = blip_vqa(client=client, pretrained=config['pretrained'], image_size=config['image_size'],
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'],
search=True)
search_model = search_model.to(device)
print_params_and_flops('vqa', search_model, device)
search_model_without_ddp = search_model
if args.distributed:
search_model = torch.nn.parallel.DistributedDataParallel(search_model, device_ids=[args.gpu])
search_model_without_ddp = search_model.module
if not args.amp:
optimizer = torch.optim.AdamW(
params=[{'params':[param for name, param in list(search_model.named_parameters()) if not ('alpha' in name)]}],
lr=config['init_lr'],
weight_decay=config['weight_decay']
)
else:
optimizer = torch.optim.AdamW(
[{'params':[param for name, param in list(search_model.named_parameters()) if not ('alpha' in name)],
'lr': config['init_lr'], 'weight_decay': config['weight_decay']},
{'params':[param for name, param in list(search_model.named_parameters()) if ('alpha' in name)],
'lr': 0, 'weight_decay': 0}]
)
print("Start searching")
scaler = torch.cuda.amp.GradScaler() if args.amp else None
for epoch in range(0, config['max_epoch']):
if args.evaluate:
break
if args.distributed:
train_loader.sampler.set_epoch(epoch)
train(search_model, train_loader, optimizer, epoch, device, config, search=True, scaler=scaler)
dist.barrier()
search_model.module.print_compression_statistics()
#### Model ####
print("Creating model for training")
model = blip_vqa(client=client, pretrained=config['pretrained'], image_size=config['image_size'],
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'])
msg = model.load_state_dict(search_model_without_ddp.state_dict(), strict=False)
model.compress(search_model_without_ddp)
else:
print("Creating model for evaluation")
model = blip_vqa(client=client, pretrained='', image_size=config['image_size'],
vit=config['vit'], vit_grad_ckpt=config['vit_grad_ckpt'], vit_ckpt_layer=config['vit_ckpt_layer'],
evaluate=True)
model.prune_if_compressed(client, config['pretrained'])
model = model.to(device)
print_params_and_flops('vqa', model, device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
optimizer = torch.optim.AdamW(params=model.parameters(), lr=config['init_lr'], weight_decay=config['weight_decay'])
best = 0
best_epoch = 0
print("Start training")
scaler = torch.cuda.amp.GradScaler() if (not args.evaluate and args.amp) else None
for epoch in range(0, config['max_epoch']):
if not args.evaluate:
if args.distributed:
train_loader.sampler.set_epoch(epoch)
cosine_lr_schedule(optimizer, epoch, config['max_epoch'], config['init_lr'], config['min_lr'])
train_stats = train(model, train_loader, optimizer, epoch, device, config, scaler=scaler)
else:
break
if utils.is_main_process():
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch,
}
# with open(os.path.join(args.output_dir, "log.txt"),"a") as f:
# f.write(json.dumps(log_stats) + "\n")
print("LOG: ", log_stats)
save_obj = {
'model': model_without_ddp.state_dict(),
# 'optimizer': optimizer.state_dict(),
# 'config': config,
# 'epoch': epoch,
}
if client is not None:
with io.BytesIO() as f:
torch.save(save_obj, f)
client.put(os.path.join('s3://BucketName/ProjectName', args.output_dir, 'checkpoint_%02d.pth'%epoch), f.getvalue())
else:
torch.save(save_obj, os.path.join(args.output_dir, 'checkpoint_%02d.pth'%epoch))
dist.barrier()