variance_analysis / damans_code.py
guanning's picture
Add files using upload-large-folder tool
26f2cbf verified
import os
import json
import torch
import matplotlib.pyplot as plt
import torchvision.models as models
from torch.utils.data import DataLoader
from tqdm import tqdm
from verl.cifar10_experiments.sampling_based_rl_objective_experiments import HFImageNet, calculate_loss
from datasets import load_dataset
from torchvision import transforms
def gradient_snr(gradients: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:
"""
Compute the gradient SNR given a set of per-sample gradients.
Args:
gradients: (N, D) tensor of N gradient vectors, each of dimension D.
eps: small constant for numerical stability.
Returns:
snr: scalar tensor, ||mean||^2 / ||var||.
"""
# gradients: (N, D)
mu = gradients.mean(dim=0) # (D,)
var = gradients.var(dim=0, unbiased=False) # (D,)
mean_sq_norm = mu.pow(2).sum() # ||mu||^2
var_sum = var.sum() # ||var||
snr = mean_sq_norm / (var_sum + eps)
return snr, mean_sq_norm, var_sum
CHECKPOINT_PATH = "/data/imagenet256_checkpoints/cross_entropy_1024/checkpoint_latest_final.pth"
BATCH_SIZE = 1024
BATCH_CACHE_PATH = f"batch_{BATCH_SIZE}.pt"
S = 64 # number of gradient samples used to estimate SNR
ADVANTAGE_TYPES = ["grpo", "reinforce_with_baseline", "reinforce_with_p_normalization",]
# ADVANTAGE_TYPES = ["grpo"]
# ROLLOUT_COUNTS = [4, 8, 16, 32, 64]
# ROLLOUT_COUNTS = [64, 128, 256]
# ROLLOUT_COUNTS = [4, 16, 64, 256, 1024, 4096]
ROLLOUT_COUNTS = [2**i for i in range(2, 22, 2)]
print(ROLLOUT_COUNTS)
# ROLLOUT_COUNTS = [2**14, 2**16, 2**18, 2**20]
# ROLLOUT_COUNTS = [2**22, 2**24, 2**26]
SNR_STATS_PATH = "snr_stats_large.json"
MINI_BATCH_SIZE = 512 # number of images per minibatch to avoid OOM
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load model
model = models.resnet50(num_classes=1000)
checkpoint = torch.load(CHECKPOINT_PATH, map_location="cpu")
model.load_state_dict(checkpoint["model_state"])
model = model.to(device)
model.eval()
# Load one batch of BATCH_SIZE images from imagenet (cached)
if os.path.exists(BATCH_CACHE_PATH):
print(f"Loading cached batch from {BATCH_CACHE_PATH}")
batch = torch.load(BATCH_CACHE_PATH)
else:
print("Cache not found, loading dataset...")
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
])
hf_ds = load_dataset("benjamin-paine/imagenet-1k-256x256")
dataset = HFImageNet(split="validation", transform=test_transform, hf_ds=hf_ds)
loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, pin_memory=True)
batch = next(iter(loader))
torch.save(batch, BATCH_CACHE_PATH)
print(f"Batch cached to {BATCH_CACHE_PATH}")
inputs, targets = batch
inputs, targets = inputs.to(device), targets.to(device)
snr_stats = {}
for advantage_type in ADVANTAGE_TYPES:
snr_stats[advantage_type] = {}
for N in ROLLOUT_COUNTS:
print(f"\n[advantage={advantage_type}, N={N}] collecting {S} gradient samples...")
try:
all_grads = []
num_minibatches = BATCH_SIZE // MINI_BATCH_SIZE
for s in tqdm(range(S)):
model.zero_grad()
for mb_start in range(0, BATCH_SIZE, MINI_BATCH_SIZE):
mb_inputs = inputs [mb_start : mb_start + MINI_BATCH_SIZE]
mb_targets = targets[mb_start : mb_start + MINI_BATCH_SIZE]
logits = model(mb_inputs)
loss = calculate_loss(
logits=logits,
targets=mb_targets,
num_train_rollouts_per_example=N,
advantage_type=advantage_type,
max_k=None,
)
# scale so accumulated gradient == average over full batch
(loss / num_minibatches).backward()
flat_grad = torch.cat([
p.grad.detach().view(-1)
for p in model.parameters()
if p.grad is not None
])
all_grads.append(flat_grad)
gradients = torch.stack(all_grads) # (S, D)
snr, mean_sq_norm, var_sum = gradient_snr(gradients)
snr, mean_sq_norm, var_sum = snr.item(), mean_sq_norm.item(), var_sum.item()
print(f" SNR = {snr:.6f}, mean = {mean_sq_norm:.6f}, var = {var_sum:.6f}")
snr_stats[advantage_type][N] = {
"snr": snr,
"mean": mean_sq_norm,
"var": var_sum,
}
except Exception as e:
print(f" ERROR: {e}")
snr_stats[advantage_type][N] = None
with open(SNR_STATS_PATH, "w") as f:
json.dump(snr_stats, f, indent=2)
print(f"\nSaved SNR stats to {SNR_STATS_PATH}")
# --- Plot ---
LABEL_MAP = {
"reinforce_with_baseline": "RLOO",
"grpo": "GRPO",
"reinforce_with_p_normalization": "MaxRL",
}
with open(SNR_STATS_PATH) as f:
plot_data = json.load(f)
fig, ax = plt.subplots(figsize=(8, 5))
for advantage_type, rollout_snrs in plot_data.items():
xs = [int(k) for k in rollout_snrs]
ys = [v["snr"] if isinstance(v, dict) else v for v in rollout_snrs.values()]
label = LABEL_MAP.get(advantage_type, advantage_type)
ax.plot(xs, ys, marker="o", label=label)
ax.set_xscale("log", base=2)
ax.set_xlabel("Rollouts (N)")
ax.set_ylabel("Gradient SNR")
ax.set_title(f"Gradient SNR vs Rollouts (S={S} gradient samples)")
ax.legend()
ax.grid(True, which="both", linestyle="--", alpha=0.5)
plt.tight_layout()
plt.savefig("snr.png", dpi=150)
print("Saved plot to snr.png")