|
|
import pytest |
|
|
import torch |
|
|
from lightning import Fabric |
|
|
|
|
|
from litgpt.utils import _RunIf |
|
|
|
|
|
|
|
|
@_RunIf(min_cuda_gpus=2, standalone=True) |
|
|
@pytest.mark.parametrize("strategy", ["ddp", "fsdp"]) |
|
|
def test_no_backward_sync(strategy): |
|
|
fabric = Fabric(devices=2, accelerator="cuda", strategy=strategy) |
|
|
fabric.launch() |
|
|
|
|
|
|
|
|
out_features = 1 if "ddp" in strategy else fabric.world_size |
|
|
|
|
|
model = torch.nn.Linear(1, out_features, bias=False, device=fabric.device) |
|
|
x = torch.randn(1, 1, device=fabric.device) |
|
|
model = fabric.setup(model) |
|
|
|
|
|
|
|
|
for i, enabled in enumerate((True, True, False, True, True, False), 1): |
|
|
x = torch.tensor([i * (fabric.local_rank + 1)], device=fabric.device, dtype=torch.float32) |
|
|
|
|
|
with fabric.no_backward_sync(model, enabled): |
|
|
y = model(x) |
|
|
fabric.backward(y.sum()) |
|
|
if not enabled: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert model.weight.grad.shape.numel() == 1, model.weight.grad.shape |
|
|
assert model.weight.grad.item() == (9.0 if i == 3 else 22.5) |
|
|
model.weight.grad = None |
|
|
|