text
stringlengths
1
93.6k
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.blip_vqa import blip_vqa
import utils
from utils import cosine_lr_schedule, print_params_and_flops
from data import create_dataset, create_sampler, create_loader
from data.vqa_dataset import vqa_collate_fn
from data.utils import save_result
import io
# from petrel_client.client import Client
import math
from torch.cuda.amp import autocast as autocast
def update_alpha_parameters(model, layers, p, pi, print_info=True):
standarlization = lambda x: (x - torch.mean(x)) / torch.std(x)
alpha_grad_attn = torch.stack([
torch.cat([getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha.grad for i in range(layers)]),
])
alpha_grad_mlp = torch.stack([
torch.stack([getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha.grad for i in range(layers)]),
torch.stack([getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha.grad for i in range(layers)]),
])
alpha_grad_attn, alpha_grad_mlp = standarlization(alpha_grad_attn), standarlization(alpha_grad_mlp)
alpha_grad = torch.cat([alpha_grad_attn.view(-1), alpha_grad_mlp.view(-1)])
sorted_alpha_grad, indices = torch.sort(alpha_grad, descending=True)
compression_weight = torch.ones_like(indices)
compression_weight[indices < alpha_grad_attn.numel()] = 36 # 36 = 12 (number of heads) * [1 (weights of query) + 1 (weights of key) + 1 (weights of value)]
threshold = sorted_alpha_grad[torch.argmin(torch.abs(torch.cumsum(compression_weight, 0) - torch.sum(compression_weight)*pi))]
def update(module, grad):
mask = ((grad <= threshold) | (grad <= torch.min(grad)))
module.data.copy_(mask + (~mask)*(1 - pi/p))
for i in range(layers):
update(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha, alpha_grad_attn[0, i].unsqueeze(0))
update(getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha, alpha_grad_attn[1, i])
update(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self.alpha, alpha_grad_attn[2, i])
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha, alpha_grad_attn[3, i])
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha, alpha_grad_attn[4, i])
update(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha, alpha_grad_mlp[0, i])
update(getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha, alpha_grad_mlp[1, i])
update(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha, alpha_grad_mlp[2, i])
if print_info:
attn, mlp = [], []
for i in range(layers):
attn.append(getattr(model.module.visual_encoder.blocks, str(i)).attn.alpha.flatten())
attn.append(getattr(model.module.text_encoder.encoder.layer, str(i)).attention.self.alpha.flatten())
attn.append(getattr(model.module.text_encoder.encoder.layer, str(i)).crossattention.self.alpha.flatten())
attn.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).attention.self.alpha.flatten())
attn.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).crossattention.self.alpha.flatten())
mlp.append(getattr(model.module.visual_encoder.blocks, str(i)).mlp.alpha.flatten())
mlp.append(getattr(model.module.text_encoder.encoder.layer, str(i)).intermediate.alpha.flatten())
mlp.append(getattr(model.module.text_decoder.bert.encoder.layer, str(i)).intermediate.alpha.flatten())
print('Current compression ratio of attn: ', 1-torch.mean(torch.cat(attn)))
print('Current compression ratio of mlp: ', 1-torch.mean(torch.cat(mlp)))
print('Current compression ratio: ', pi)
def train(model, data_loader, optimizer, epoch, device, config, search=False, scaler=None):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
if search:
metric_logger.add_meter('loss_ce', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_sp_attn', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
metric_logger.add_meter('loss_sp_mlp', utils.SmoothedValue(window_size=50, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch) if not search else 'Search Epoch: [{}]'.format(epoch)
print_freq = 50
len_data_loader = len(data_loader)
total_steps = len_data_loader*config['max_epoch']
for i,(image, question, answer, weights, n) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image, weights = image.to(device,non_blocking=True), weights.to(device,non_blocking=True)
if scaler is not None:
with autocast():
loss = model(image, question, answer, train=True, n=n, weights=weights)
if search:
sparsity_loss_attn, sparsity_loss_mlp = model.module.get_sparsity_loss()
metric_logger.update(loss_ce=loss.item())
metric_logger.update(loss_sp_attn=config['w_sp_attn'] * sparsity_loss_attn.item())
metric_logger.update(loss_sp_mlp=config['w_sp_mlp'] * sparsity_loss_mlp.item())