| import os |
| import json |
| import torch |
| import matplotlib.pyplot as plt |
| import torchvision.models as models |
| from torch.utils.data import DataLoader |
| from tqdm import tqdm |
|
|
| from verl.cifar10_experiments.sampling_based_rl_objective_experiments import HFImageNet, calculate_loss |
|
|
| from datasets import load_dataset |
| from torchvision import transforms |
|
|
| def gradient_snr(gradients: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: |
| """ |
| Compute the gradient SNR given a set of per-sample gradients. |
| |
| Args: |
| gradients: (N, D) tensor of N gradient vectors, each of dimension D. |
| eps: small constant for numerical stability. |
| |
| Returns: |
| snr: scalar tensor, ||mean||^2 / ||var||. |
| """ |
| |
| mu = gradients.mean(dim=0) |
| var = gradients.var(dim=0, unbiased=False) |
| mean_sq_norm = mu.pow(2).sum() |
| var_sum = var.sum() |
| snr = mean_sq_norm / (var_sum + eps) |
| return snr, mean_sq_norm, var_sum |
|
|
|
|
| CHECKPOINT_PATH = "/data/imagenet256_checkpoints/cross_entropy_1024/checkpoint_latest_final.pth" |
| BATCH_SIZE = 1024 |
| BATCH_CACHE_PATH = f"batch_{BATCH_SIZE}.pt" |
| S = 64 |
|
|
| ADVANTAGE_TYPES = ["grpo", "reinforce_with_baseline", "reinforce_with_p_normalization",] |
| |
| |
| |
| |
| ROLLOUT_COUNTS = [2**i for i in range(2, 22, 2)] |
| print(ROLLOUT_COUNTS) |
| |
|
|
| |
|
|
| SNR_STATS_PATH = "snr_stats_large.json" |
| MINI_BATCH_SIZE = 512 |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| |
| model = models.resnet50(num_classes=1000) |
| checkpoint = torch.load(CHECKPOINT_PATH, map_location="cpu") |
| model.load_state_dict(checkpoint["model_state"]) |
| model = model.to(device) |
| model.eval() |
|
|
| |
| if os.path.exists(BATCH_CACHE_PATH): |
| print(f"Loading cached batch from {BATCH_CACHE_PATH}") |
| batch = torch.load(BATCH_CACHE_PATH) |
| else: |
| print("Cache not found, loading dataset...") |
| mean = (0.485, 0.456, 0.406) |
| std = (0.229, 0.224, 0.225) |
|
|
| test_transform = transforms.Compose([ |
| transforms.Resize(256), |
| transforms.CenterCrop(224), |
| transforms.ToTensor(), |
| transforms.Normalize(mean=mean, std=std), |
| ]) |
|
|
| hf_ds = load_dataset("benjamin-paine/imagenet-1k-256x256") |
| dataset = HFImageNet(split="validation", transform=test_transform, hf_ds=hf_ds) |
| loader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4, pin_memory=True) |
|
|
| batch = next(iter(loader)) |
| torch.save(batch, BATCH_CACHE_PATH) |
| print(f"Batch cached to {BATCH_CACHE_PATH}") |
|
|
| inputs, targets = batch |
| inputs, targets = inputs.to(device), targets.to(device) |
|
|
| snr_stats = {} |
|
|
| for advantage_type in ADVANTAGE_TYPES: |
| snr_stats[advantage_type] = {} |
| for N in ROLLOUT_COUNTS: |
| print(f"\n[advantage={advantage_type}, N={N}] collecting {S} gradient samples...") |
| try: |
| all_grads = [] |
|
|
| num_minibatches = BATCH_SIZE // MINI_BATCH_SIZE |
|
|
| for s in tqdm(range(S)): |
| model.zero_grad() |
|
|
| for mb_start in range(0, BATCH_SIZE, MINI_BATCH_SIZE): |
| mb_inputs = inputs [mb_start : mb_start + MINI_BATCH_SIZE] |
| mb_targets = targets[mb_start : mb_start + MINI_BATCH_SIZE] |
|
|
| logits = model(mb_inputs) |
|
|
| loss = calculate_loss( |
| logits=logits, |
| targets=mb_targets, |
| num_train_rollouts_per_example=N, |
| advantage_type=advantage_type, |
| max_k=None, |
| ) |
|
|
| |
| (loss / num_minibatches).backward() |
|
|
| flat_grad = torch.cat([ |
| p.grad.detach().view(-1) |
| for p in model.parameters() |
| if p.grad is not None |
| ]) |
| all_grads.append(flat_grad) |
|
|
| gradients = torch.stack(all_grads) |
| snr, mean_sq_norm, var_sum = gradient_snr(gradients) |
| snr, mean_sq_norm, var_sum = snr.item(), mean_sq_norm.item(), var_sum.item() |
| print(f" SNR = {snr:.6f}, mean = {mean_sq_norm:.6f}, var = {var_sum:.6f}") |
| snr_stats[advantage_type][N] = { |
| "snr": snr, |
| "mean": mean_sq_norm, |
| "var": var_sum, |
| } |
|
|
| except Exception as e: |
| print(f" ERROR: {e}") |
| snr_stats[advantage_type][N] = None |
|
|
| with open(SNR_STATS_PATH, "w") as f: |
| json.dump(snr_stats, f, indent=2) |
|
|
| print(f"\nSaved SNR stats to {SNR_STATS_PATH}") |
|
|
| |
| LABEL_MAP = { |
| "reinforce_with_baseline": "RLOO", |
| "grpo": "GRPO", |
| "reinforce_with_p_normalization": "MaxRL", |
| } |
|
|
| with open(SNR_STATS_PATH) as f: |
| plot_data = json.load(f) |
|
|
| fig, ax = plt.subplots(figsize=(8, 5)) |
|
|
| for advantage_type, rollout_snrs in plot_data.items(): |
| xs = [int(k) for k in rollout_snrs] |
| ys = [v["snr"] if isinstance(v, dict) else v for v in rollout_snrs.values()] |
| label = LABEL_MAP.get(advantage_type, advantage_type) |
| ax.plot(xs, ys, marker="o", label=label) |
|
|
| ax.set_xscale("log", base=2) |
| ax.set_xlabel("Rollouts (N)") |
| ax.set_ylabel("Gradient SNR") |
| ax.set_title(f"Gradient SNR vs Rollouts (S={S} gradient samples)") |
| ax.legend() |
| ax.grid(True, which="both", linestyle="--", alpha=0.5) |
|
|
| plt.tight_layout() |
| plt.savefig("snr.png", dpi=150) |
| print("Saved plot to snr.png") |