text
stringlengths
1
93.6k
def set_seed(seed=123):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ['PYTHONHASHSEED'] = str(seed)
def calc_grad_norm(model):
grad_sum_sqrd = 0
for param in model.parameters():
if param.grad is not None:
grad_sum_sqrd += torch.sum((param.grad.detach().clone().flatten())**2)
norm = torch.sqrt(grad_sum_sqrd)
return norm
'''for each prediction, calculates the entropy (of the predicted distribution over all tokens), and then calculates the mean over all predictions in the batch'''
def calc_mean_entropy(predicted_logits):
vocab_size = predicted_logits.shape[2]
probabilities = torch.softmax(predicted_logits.reshape(-1, vocab_size), axis=1)
prob_zeros_mask = probabilities == 0.
tmp = probabilities * torch.log2(probabilities) # when a probability equals 0 this gives 0*-inf and torch returns nan. by the entropy definition it should equal 0, so we fix that
tmp[prob_zeros_mask] = 0.
if torch.any(torch.isnan(tmp)):
warnings.warn("Warning: entropy calculation (metric) has nans in it")
entropy = -torch.sum(tmp, axis=1)
return torch.mean(entropy)
def init_grad_flow_data(model):
grad_flow_data = {}
grad_flow_data['steps'] = []
for module_name, module in model.named_children():
layers = []
for n, p in module.named_parameters():
if(p.requires_grad) and ("bias" not in n):
if n.startswith('layers.'):
layer_num = n.split('.')[1]
layer_num = layer_num if len(layer_num)==2 else f'0{layer_num}'
layer_name = f'layer_{layer_num}'
if layer_name not in layers:
grad_flow_data[f'{module_name}/{layer_name}'] = []
layers.append(layer_name)
else:
grad_flow_data[f'{module_name}/{n}'] = []
layers.append(n)
return grad_flow_data
def get_grad_flow_log_format(model, step, grad_flow_data):
log_dict = {}
grad_flow_data['steps'].append(step)
for module_name, module in model.named_children():
cur_layers, cur_avg_grads, cur_max_grads = _calc_grad_flow(module.named_parameters(), module_name)
if len(cur_layers) == 0:
continue
keys = []
y_vals = []
for i in range(len(cur_layers)):
layer_name = cur_layers[i]
avg_grads = cur_avg_grads[i]
max_grads = cur_max_grads[i]
# update db
grad_flow_data[f'{module_name}/{layer_name}'].append(avg_grads)
# grad_flow_data[f'max_grad/{module_name}/{layer_name}'].append(max_grads)
# for wandb
keys.append(layer_name)
y_vals.append(grad_flow_data[f'{module_name}/{layer_name}'])
# save in wandb structure
log_dict[module_name] = wandb.plot.line_series(
xs=grad_flow_data['steps'],
ys=y_vals,
keys=keys,
title=f'{module_name} grad flow (normalized by weight values)',
xname="steps")
return log_dict, grad_flow_data
def _calc_grad_flow(named_parameters, module_name, epsilon=1e-13):
avg_grads = []
avg_weights = []
max_grads= []
layers = []
norm = 'l2' # 'l1' / 'l2'
# num_elements_in_layer = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
p_grad = p.grad.cpu()
p_weight = p.detach().clone().cpu()
if n.startswith('layers.'): # block x (layers.x) has a few components, we aggregate them
layer_num = n.split('.')[1]
layer_num = layer_num if len(layer_num)==2 else f'0{layer_num}'
if f'layer_{layer_num}' not in layers: