text
stringlengths
1
93.6k
loss += config['w_sp_attn'] * sparsity_loss_attn + config['w_sp_mlp'] * sparsity_loss_mlp
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss = model(image, question, answer, train=True, n=n, weights=weights)
if search:
sparsity_loss_attn, sparsity_loss_mlp = model.module.get_sparsity_loss()
metric_logger.update(loss_ce=loss.item())
metric_logger.update(loss_sp_attn=config['w_sp_attn'] * sparsity_loss_attn.item())
metric_logger.update(loss_sp_mlp=config['w_sp_mlp'] * sparsity_loss_mlp.item())
loss += config['w_sp_attn'] * sparsity_loss_attn + config['w_sp_mlp'] * sparsity_loss_mlp
optimizer.zero_grad()
loss.backward()
optimizer.step()
step = epoch*len_data_loader+i
if search and (step % 1000 == 0 or step == total_steps - 1):
pi = config['p']*((1-math.cos(math.pi*(step+1)/total_steps))/2)**(1/2)
update_alpha_parameters(model, 12, config['p'], pi)
metric_logger.update(loss=loss.item())
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluation(model, data_loader, device, config) :
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Generate VQA test result:'
print_freq = 50
result = []
if config['inference']=='rank':
answer_list = data_loader.dataset.answer_list
answer_candidates = model.tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
answer_candidates.input_ids[:,0] = model.tokenizer.bos_token_id
for n, (image, question, question_id) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
if config['inference']=='generate':
answers = model(image, question, train=False, inference='generate')
for answer, ques_id in zip(answers, question_id):
ques_id = int(ques_id.item())
result.append({"question_id":ques_id, "answer":answer})
elif config['inference']=='rank':
answer_ids = model(image, question, answer_candidates, train=False, inference='rank', k_test=config['k_test'])
for ques_id, answer_id in zip(question_id, answer_ids):
result.append({"question_id":int(ques_id.item()), "answer":answer_list[answer_id]})
return result
def main(args, config, client):
utils.init_distributed_mode(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
config['pretrained'] = args.pretrained
config['w_sp_attn'] = args.w_sp_attn / args.world_size
config['w_sp_mlp'] = args.w_sp_mlp /args.world_size
config['max_epoch'] = args.epoch
config['p'] = args.p
if not args.evaluate:
print('Target compression ratio: {}%'.format(config['p']*100))
#### Dataset ####
print("Creating vqa datasets")
datasets = create_dataset('vqa', config, client)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
samplers = create_sampler(datasets, [True, False], num_tasks, global_rank)
else:
samplers = [None, None]
train_loader, test_loader = create_loader(datasets,samplers,
batch_size=[config['batch_size_train'],config['batch_size_test']],
num_workers=[4,4],is_trains=[True, False],