text stringlengths 1 93.6k |
|---|
layers.append(f'layer_{layer_num}')
|
avg_grads.append(0.)
|
avg_weights.append(0.)
|
max_grads.append(0.)
|
# num_elements_in_layer.append(0)
|
# num_elements_in_layer[-1]+=len(p_grad.flatten())
|
if norm == 'l2':
|
avg_grads[-1]+=p_grad.square().sum()
|
avg_weights[-1]+=p_weight.square().sum()
|
else:
|
avg_grads[-1]+=p_grad.abs().sum()
|
avg_weights[-1]+=p_weight.abs().sum()
|
max_grads[-1] = torch.max(torch.Tensor([max_grads[-1], p_grad.abs().max()]))
|
else:
|
layers.append(n)
|
if norm == 'l2':
|
avg_grads.append(p_grad.square().sum())
|
avg_weights.append(p_weight.square().sum())
|
else:
|
avg_grads.append(p_grad.abs().sum())
|
avg_weights.append(p_weight.abs().sum())
|
max_grads.append(p_grad.abs().max())
|
# num_elements_in_layer.append(len(p_grad.flatten()))
|
# avg_grads = [avg_grads[i]/num_elements_in_layer[i] for i in range(len(avg_grads))]
|
if norm == 'l2':
|
avg_grads = [torch.sqrt(avg_grads[i]/(avg_weights[i]+epsilon)) for i in range(len(avg_grads))] # no need to divide by num_elements_in_layer, it cancels out in avg_grad/avg_weight
|
else:
|
avg_grads = [avg_grads[i]/(avg_weights[i]+epsilon) for i in range(len(avg_grads))] # no need to divide by num_elements_in_layer, it cancels out in avg_grad/avg_weight
|
return layers, avg_grads, max_grads
|
def init_entropy_per_layer_data():
|
entropy_data = {}
|
entropy_data['steps'] = []
|
for layer_idx in range(24):
|
layer_num = layer_idx if len(str(layer_idx))==2 else f'0{layer_idx}'
|
entropy_data[f'layer_{layer_num}'] = []
|
return entropy_data
|
def get_log_format_for_per_layer_entropy(step, mean_entropy_per_layer, entropy_data):
|
log_dict = {}
|
entropy_data['steps'].append(step)
|
keys = []
|
y_vals = []
|
for layer_idx in range(len(mean_entropy_per_layer)):
|
layer_num = layer_idx if len(str(layer_idx))==2 else f'0{layer_idx}'
|
layer_name = f'layer_{layer_num}'
|
avg_entropy = mean_entropy_per_layer[layer_idx]
|
# max_grads = cur_max_grads[i]
|
# update db
|
entropy_data[layer_name].append(avg_entropy)
|
# for wandb
|
keys.append(layer_name)
|
y_vals.append(entropy_data[layer_name])
|
# save in wandb structure
|
log_dict['mean_entropy_per_layer'] = wandb.plot.line_series(
|
xs=entropy_data['steps'],
|
ys=y_vals,
|
keys=keys,
|
title=f'mean entropy mem activity per layer',
|
xname="steps")
|
return log_dict, entropy_data
|
def convert_niah_array_to_img(niah_array, config):
|
fig=plt.figure()
|
plt.xticks(list(range(len(config['niah_context_lens_eval']))), config['niah_context_lens_eval'])
|
plt.yticks(list(range(len(config['niah_needle_depths_eval']))), config['niah_needle_depths_eval'])
|
plt.xlabel('context length [toks]')
|
plt.ylabel('needle depth w.r.t context length')
|
plt.title('niah map')
|
cmap = matplotlib.colors.ListedColormap(['tomato', 'lightgreen'])
|
context_len_train = config['niah_context_len_train']
|
plt.imshow(niah_array, interpolation='none', cmap=cmap)
|
index_train_context_len = config['niah_context_lens_eval'].index(context_len_train)
|
plt.axvline(x=index_train_context_len, color='black', linewidth=3)
|
plt.annotate(f'train context len = {context_len_train//1000}k',
|
xy=(index_train_context_len, 0.8), xycoords='data',
|
horizontalalignment='right', verticalalignment='top', rotation=90, fontsize=12)
|
fig.canvas.draw()
|
niah_img = PIL.Image.frombytes('RGB', fig.canvas.get_width_height(),fig.canvas.tostring_rgb())
|
return niah_img
|
# <FILESEP>
|
import cv2
|
import pyaudio
|
import wave
|
import threading
|
import numpy as np
|
import time
|
from queue import Queue
|
import webrtcvad
|
import os
|
import threading
|
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.