python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
|---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn
from torchmultimodal.models.flava.image_encoder import ImageEmbeddings, ImageTransformer
from torchmultimodal.modules.layers.transformer import TransformerEncoder
@pytest.fixture(autouse=True)
def random():
set_rng_seed(0)
class TestFlavaImageEncoder:
@pytest.fixture
def image_encoder_components(self):
image_embedding = ImageEmbeddings(image_size=2, patch_size=1, hidden_size=2)
encoder = TransformerEncoder(
n_layer=1,
d_model=2,
n_head=1,
dim_feedforward=1,
activation=nn.GELU,
norm_first=True,
)
image_encoder = ImageTransformer(
embeddings=image_embedding,
encoder=encoder,
layernorm=nn.LayerNorm(2),
pooler=nn.Identity(),
)
return image_encoder, image_embedding
@pytest.fixture
def input(self):
return torch.ones(2, 3, 2, 2)
def test_embedding(self, image_encoder_components, input):
_, image_embedding = image_encoder_components
out = image_embedding(input)
assert_expected(
out,
torch.Tensor(
[
[
[0.0000, 0.0000],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
],
[
[0.0000, 0.0000],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
],
]
),
atol=1e-4,
rtol=0,
)
def test_image_encoder(self, image_encoder_components, input):
image_encoder, _ = image_encoder_components
out = image_encoder(input)
assert_expected(
out.last_hidden_state,
torch.Tensor(
[
[
[-0.0040, 0.0040],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
],
[
[-0.0040, 0.0040],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
[-0.9840, 0.9840],
],
]
),
atol=1e-4,
rtol=0,
)
assert_expected(out.pooler_output, out.last_hidden_state)
assert_expected(
out.hidden_states,
(
torch.Tensor(
[
[
[0.0000, 0.0000],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
],
[
[0.0000, 0.0000],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
[0.0224, 0.0573],
],
]
),
torch.Tensor(
[
[
[0.0008, 0.0008],
[0.0232, 0.0581],
[0.0232, 0.0581],
[0.0232, 0.0581],
[0.0232, 0.0581],
],
[
[0.0008, 0.0008],
[0.0232, 0.0581],
[0.0232, 0.0581],
[0.0232, 0.0581],
[0.0232, 0.0581],
],
]
),
),
atol=1e-4,
rtol=0,
)
assert_expected(
out.attentions,
(
torch.Tensor(
[
[
[
[0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
]
],
[
[
[0.2000, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
[0.1999, 0.2000, 0.2000, 0.2000, 0.2000],
]
],
]
),
),
atol=1e-4,
rtol=0,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/flava/test_image_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from math import inf
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.clip.text_encoder import CLIPTextEncoder
class TestCLIPTextEncoder:
@pytest.fixture
def start(self):
set_rng_seed(1234)
context_length = 77
batch_size, embedding_dim, heads = 2, 4, 2
def build_text(text_length):
return torch.randint(1, 10, (batch_size, text_length), dtype=torch.long)
def build_encoder(
embedding_dim=embedding_dim,
use_clip_init=True,
context_length=context_length,
heads=heads,
):
return CLIPTextEncoder(
embedding_dim=embedding_dim,
use_clip_init=use_clip_init,
context_length=context_length,
heads=heads,
)
return build_encoder, build_text
def test_clip_parameters(self, start):
build_encoder, _ = start
# Use larger embedding size for stability in std
text_encoder = build_encoder(embedding_dim=50)
assert_expected(
actual=torch.std(text_encoder.token_embedding.weight).item(),
expected=0.02,
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.std(text_encoder.positional_embedding).item(),
expected=0.01,
rtol=0,
atol=1e-3,
)
proj_std = 0.0090
attn_std = 0.0442
fc_std = 0.0313
for layer in text_encoder.encoder.layers:
assert_expected(
actual=torch.std(layer.self_attn.in_proj_weight).item(),
expected=attn_std,
rtol=0,
atol=5e-3,
)
assert_expected(
actual=torch.std(layer.self_attn.out_proj.weight).item(),
expected=proj_std,
rtol=0,
atol=5e-3,
)
assert_expected(
actual=torch.std(layer.linear1.weight).item(),
expected=fc_std,
rtol=0,
atol=5e-3,
)
assert_expected(
actual=torch.std(layer.linear2.weight).item(),
expected=proj_std,
rtol=0,
atol=5e-3,
)
assert_expected(
actual=torch.std(text_encoder.projection.weight).item(),
expected=attn_std,
rtol=0,
atol=5e-3,
)
def test_attention_mask(self, start):
build_encoder, _ = start
text_encoder = build_encoder(context_length=4)
assert isinstance(text_encoder, torch.nn.Module)
actual = text_encoder.build_attention_mask()
expected = torch.Tensor(
[[0, -inf, -inf, -inf], [0, 0, -inf, -inf], [0, 0, 0, -inf], [0, 0, 0, 0]]
)
assert_expected(actual=actual, expected=expected, rtol=0, atol=0)
def test_forward(self, start):
build_encoder, build_text = start
text = build_text(text_length=77)
text_encoder = build_encoder()
assert isinstance(text_encoder, torch.nn.Module)
actual_clip_init = text_encoder(text)
expected_clip_init = torch.Tensor(
[[-1.3103, -0.6713, -0.9614, 0.7010], [1.1780, 0.1888, 0.8019, 0.7287]]
)
assert_expected(
actual=actual_clip_init, expected=expected_clip_init, rtol=0, atol=1e-4
)
def test_forward_over_context_length(self, start):
build_encoder, build_text = start
text_encoder = build_encoder()
assert isinstance(text_encoder, torch.nn.Module)
text = build_text(text_encoder.context_length + 1)
with pytest.raises(ValueError):
text_encoder(text)
def test_scripting(self, start):
build_encoder, build_text = start
text = build_text(text_length=77)
text_encoder = build_encoder()
assert isinstance(text_encoder, torch.nn.Module)
scripted_encoder = torch.jit.script(text_encoder)
actual = scripted_encoder(text)
expected = torch.Tensor(
[[-1.3103, -0.6713, -0.9614, 0.7010], [1.1780, 0.1888, 0.8019, 0.7287]]
)
assert_expected(actual=actual, expected=expected, rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/clip/test_text_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/clip/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.clip.image_encoder import ResNetForCLIP
from torchmultimodal.models.clip.model import CLIP
from torchmultimodal.models.clip.text_encoder import CLIPTextEncoder
from torchvision.models.vision_transformer import VisionTransformer
@pytest.fixture(autouse=True)
def random():
set_rng_seed(1234)
class TestCLIP:
@pytest.fixture(scope="class")
def context_length(self):
return 77
def test_clip_forward(self):
encoder_a = torch.nn.Linear(5, 3)
encoder_b = torch.nn.Linear(4, 3)
clip = CLIP(encoder_a, encoder_b)
input_a = torch.randint(1, 8, (2, 5), dtype=torch.float)
input_b = torch.randint(1, 8, (2, 4), dtype=torch.float)
assert isinstance(clip, torch.nn.Module)
out = clip(input_a, input_b)
assert (
hasattr(out, "embeddings_a")
and hasattr(out, "embeddings_b")
and len(out) == 2
)
actual_a_embedding = out.embeddings_a
actual_b_embedding = out.embeddings_b
expected_a_embedding = torch.Tensor(
[[-0.8066, -0.1749, 0.5647], [-0.7709, -0.1118, 0.6271]]
)
expected_b_embedding = torch.Tensor(
[[-0.1719, 0.7932, 0.5842], [-0.2805, 0.8761, -0.3921]]
)
assert_expected(
actual=actual_a_embedding, expected=expected_a_embedding, rtol=0, atol=1e-4
)
assert_expected(
actual=actual_b_embedding, expected=expected_b_embedding, rtol=0, atol=1e-4
)
def test_clip_resnet_forward(self, context_length):
resnet_encoder = ResNetForCLIP(
layers=(3, 4, 6, 3),
output_dim=12,
heads=10,
width=20,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
clip_resnet = CLIP(
encoder_a=resnet_encoder,
encoder_b=text_encoder,
)
assert isinstance(clip_resnet, torch.nn.Module)
text = torch.randint(1, 79, (context_length,), dtype=torch.long).unsqueeze(0)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_resnet_scores = clip_resnet(features_a=image, features_b=text)
assert_expected(
torch.tensor(clip_resnet_scores.embeddings_a.size()), torch.tensor((1, 12))
)
assert_expected(
torch.tensor(clip_resnet_scores.embeddings_b.size()), torch.tensor((1, 12))
)
def test_clip_vit_forward(self, context_length):
vit_encoder = VisionTransformer(
image_size=224,
patch_size=16,
num_layers=12,
num_heads=12,
hidden_dim=768,
mlp_dim=3072,
num_classes=12,
)
text_encoder = CLIPTextEncoder(
embedding_dim=12,
context_length=context_length,
vocab_size=100,
width=512,
heads=8,
layers=12,
)
text = torch.randint(1, 79, (context_length,), dtype=torch.long).unsqueeze(0)
image = torch.randn(3, 224, 224).unsqueeze(0)
clip_vit = CLIP(encoder_a=vit_encoder, encoder_b=text_encoder)
assert isinstance(clip_vit, torch.nn.Module)
clip_vit_scores = clip_vit(features_a=image, features_b=text)
assert_expected(
torch.tensor(clip_vit_scores.embeddings_a.size()), torch.tensor((1, 12))
)
assert_expected(
torch.tensor(clip_vit_scores.embeddings_b.size()), torch.tensor((1, 12))
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/clip/test_clip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.clip import model as pretrained
class TestCLIPCheckpoint:
@pytest.fixture(scope="class")
def data(self):
set_rng_seed(0)
image224 = torch.randn(1, 3, 224, 224)
image288 = torch.randn(1, 3, 288, 288)
image384 = torch.randn(1, 3, 384, 384)
image448 = torch.randn(1, 3, 448, 448)
text = torch.randint(0, 49408, (1, 77))
return text, image224, image288, image384, image448
def test_clip_vit_b16(self, data):
text, image224, *_ = data
model = pretrained.clip_vit_b16(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image224, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(0.0030),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(0.0023),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 512)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 512)),
)
def test_clip_vit_b32(self, data):
text, image224, *_ = data
model = pretrained.clip_vit_b32(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image224, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(-0.0014),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(-0.0041),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 512)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 512)),
)
def test_clip_vit_l14(self, data):
text, image224, *_ = data
model = pretrained.clip_vit_l14(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image224, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(0.0006),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(-0.0022),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 768)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 768)),
)
def test_clip_rn50(self, data):
text, image224, *_ = data
model = pretrained.clip_rn50(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image224, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(-0.0012),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(-0.0001),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 1024)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 1024)),
)
def test_clip_rn101(self, data):
text, image224, *_ = data
model = pretrained.clip_rn101(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image224, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(-0.0012),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(-0.0017),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 512)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 512)),
)
def test_clip_rn50x4(self, data):
text, _, image288, *_ = data
model = pretrained.clip_rn50x4(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image288, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(0.0006),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(0.0002),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 640)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 640)),
)
def test_clip_rn50x16(self, data):
text, *_, image384, _ = data
model = pretrained.clip_rn50x16(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image384, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(0.0017),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(0.0012),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 768)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 768)),
)
def test_clip_rn50x64(self, data):
text, *_, image448 = data
model = pretrained.clip_rn50x64(True)
model.eval()
with torch.no_grad():
actual_a_embedding, actual_b_embedding = model(image448, text)
assert_expected(
actual=actual_a_embedding.mean(),
expected=torch.tensor(0.0004),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=actual_b_embedding.mean(),
expected=torch.tensor(-0.0004),
rtol=0,
atol=1e-4,
)
assert_expected(
actual=torch.tensor(actual_a_embedding.size()),
expected=torch.tensor((1, 1024)),
)
assert_expected(
actual=torch.tensor(actual_b_embedding.size()),
expected=torch.tensor((1, 1024)),
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/clip/test_checkpoint.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import Tensor
from torchmultimodal.models.clip.image_encoder import CLIPViTEncoder, ResNetForCLIP
from torchmultimodal.utils.common import get_current_device
@pytest.fixture(autouse=True)
def set_seed():
set_rng_seed(1234)
@pytest.fixture
def device():
return get_current_device()
class TestResnetEncoder:
def test_resnet(self, device):
resnet = ResNetForCLIP(
layers=(3, 4, 6, 3),
output_dim=512,
heads=1024,
)
assert isinstance(resnet, torch.nn.Module)
image = torch.randn(3, 224, 224).unsqueeze(0)
resnet = resnet.to(device)
scores = resnet(image)
assert_expected(actual=scores.size(), expected=torch.Size((1, 512)))
assert_expected(actual=scores.sum().item(), expected=2.1351, rtol=0, atol=1e-3)
class TestCLIPViTEncoder:
@pytest.fixture(autouse=True)
def clip_vit_encoder(self):
set_rng_seed(0)
encoder = CLIPViTEncoder(
embedding_dim=4,
heads=2,
layers=1,
patch_size=2,
image_size=16,
width=2,
)
encoder.eval()
return encoder
def test_forward(self, clip_vit_encoder):
input = torch.ones(2, 3, 16, 16)
out = clip_vit_encoder(input)
expected = Tensor(
[[1.1296, -0.6523, 0.3949, -0.7351], [1.1296, -0.6523, 0.3949, -0.7351]]
)
assert_expected(expected, out, atol=1e-4, rtol=0)
def test_invalid_input(self, clip_vit_encoder):
input = torch.ones(2, 3, 5, 5)
with pytest.raises(ValueError):
clip_vit_encoder(input)
input = torch.ones(2, 2, 16, 16)
with pytest.raises(ValueError):
clip_vit_encoder(input)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/clip/test_image_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn, Tensor
from torchmultimodal.models.albef.image_encoder import ALBEFVisionEncoder
from torchmultimodal.models.albef.model import (
ALBEFModel,
ALBEFModelWithSimilarity,
ALBEFSimilarity,
)
from torchmultimodal.models.albef.multimodal_encoder import ALBEFMultimodalEncoder
from torchmultimodal.modules.encoders.bert_text_encoder import bert_text_encoder
from torchmultimodal.utils.common import momentum_update, remove_grad
@pytest.fixture(autouse=True)
def random():
set_rng_seed(0)
@pytest.fixture
def vision_encoder():
return ALBEFVisionEncoder(
image_size=4,
patch_size=4,
num_hidden_layers=2,
num_attention_heads=1,
hidden_size=3,
mlp_dim=6,
)
@pytest.fixture
def text_transformer():
return bert_text_encoder(hidden_size=3, num_attention_heads=1, dropout=0.0)
@pytest.fixture
def multimodal_encoder():
return ALBEFMultimodalEncoder(hidden_size=3, num_attention_heads=1)
@pytest.fixture
def albef_model(vision_encoder, text_transformer, multimodal_encoder):
return ALBEFModel(
vision_encoder,
text_transformer,
multimodal_encoder,
)
@pytest.fixture
def albef_with_sim(albef_model):
return ALBEFModelWithSimilarity(
albef_model,
nn.Linear(3, 2),
nn.Linear(3, 2),
embed_size=2,
queue_size=4,
)
@pytest.fixture
def albef_model_output(albef_model):
image = torch.randn(2, 3, 4, 4)
text = torch.randint(10, (2, 2))
text_atts = Tensor([[1, 1], [1, 0]])
return albef_model(image, text, text_atts)
def test_albef_image_embeddings(albef_model_output):
expected = Tensor(
[
[[1.364883, -1.003092, -0.361791], [-0.634884, 1.411830, -0.776947]],
[[1.401580, -0.537510, -0.864071], [1.378901, -0.417473, -0.961429]],
]
)
assert_expected(albef_model_output.image_embeddings, expected, rtol=0, atol=1e-4)
def test_albef_image_embeddings_momentum(albef_model_output):
expected = Tensor(
[
[[1.364883, -1.003092, -0.361791], [-0.634884, 1.411830, -0.776947]],
[[1.401580, -0.537510, -0.864070], [1.378902, -0.417473, -0.961429]],
]
)
assert_expected(albef_model_output.image_embeddings_m, expected, rtol=0, atol=1e-4)
def test_albef_text_embeddings(albef_model_output):
expected = Tensor(
[
[[-0.317956, 1.352367, -1.034411], [1.064044, -1.338780, 0.274735]],
[[-1.320019, 0.220507, 1.099512], [1.411497, -0.781628, -0.629869]],
]
)
assert_expected(albef_model_output.text_embeddings, expected, rtol=0, atol=1e-4)
def test_albef_text_embeddings_momentum(albef_model_output):
expected = Tensor(
[
[[-0.317956, 1.352367, -1.034411], [1.064044, -1.338780, 0.274735]],
[[-1.320019, 0.220507, 1.099512], [1.411497, -0.781628, -0.629869]],
]
)
assert_expected(albef_model_output.text_embeddings_m, expected, rtol=0, atol=1e-4)
def test_albef_multimodal_embeddings(albef_model_output):
expected = Tensor(
[
[[-0.068738, 1.257666, -1.188928], [1.409873, -0.609056, -0.800817]],
[[-1.402520, 0.544084, 0.858435], [1.202279, -1.246038, 0.043760]],
]
)
assert_expected(
albef_model_output.multimodal_embeddings, expected, rtol=0, atol=1e-4
)
def test_albef_multimodal_embeddings_momentum(albef_model_output):
expected = Tensor(
[
[[-0.068738, 1.257666, -1.188928], [1.409873, -0.609056, -0.800817]],
[[-1.402520, 0.544084, 0.858435], [1.202279, -1.246038, 0.043760]],
]
)
assert_expected(
albef_model_output.multimodal_embeddings_m, expected, rtol=0, atol=1e-4
)
def test_copy_params_momentum_models():
model = nn.Linear(3, 2)
model_m = copy.deepcopy(model)
remove_grad(model_m)
for param, param_m in zip(model.parameters(), model_m.parameters()):
assert_expected(param, param_m, rtol=0, atol=1e-4)
assert not param_m.requires_grad
def test_dequeue_and_enqueue(albef_with_sim):
image_feat_m = torch.randn(2, 2)
text_feat_m = torch.randn(2, 2)
idx = Tensor([[2], [1]]).type(torch.long)
albef_with_sim._dequeue_and_enqueue(image_feat_m, text_feat_m, idx)
assert_expected(
albef_with_sim.image_queue[:, 0:2],
image_feat_m.T,
rtol=0,
atol=1e-4,
)
assert_expected(albef_with_sim.text_queue[:, 0:2], text_feat_m.T, rtol=0, atol=1e-4)
assert_expected(albef_with_sim.idx_queue[:, 0:2], idx.T, rtol=0, atol=1e-4)
def test_momentum_update():
init_weight = Tensor([[1, 2, 3], [4, 5, 6]])
init_weight_m = Tensor([[6, 5, 4], [3, 2, 1]])
model = nn.Linear(3, 2)
model_m = nn.Linear(3, 2)
model.weight = nn.Parameter(init_weight)
model_m.weight = nn.Parameter(init_weight_m)
momentum_update(model, model_m, 0.75)
expected_weight_m = Tensor([[4.75, 4.25, 3.75], [3.25, 2.75, 2.25]])
assert_expected(model.weight, init_weight, rtol=0, atol=1e-4)
assert_expected(model_m.weight, expected_weight_m, rtol=0, atol=1e-4)
def test_similarity(albef_with_sim):
albef_with_sim.image_queue = torch.randn(2, 4)
albef_with_sim.text_queue = torch.randn(2, 4)
image_embeds = torch.randn(2, 5, 3)
image_embeds_m = torch.randn(2, 5, 3)
text_embeds = torch.randn(2, 7, 3)
text_embeds_m = torch.randn(2, 7, 3)
idx = Tensor([[2], [1]]).type(torch.long)
output = albef_with_sim._similarity(
image_embeds, image_embeds_m, text_embeds, text_embeds_m, idx
)
expected_sim_i2t = Tensor(
[
[-5.128132, -13.669198, -2.814691, 7.166637, 19.930466, 20.275330],
[9.302484, 11.485555, -5.828896, -7.156259, -17.247587, -26.397799],
]
)
expected_sim_t2i = Tensor(
[
[12.8447, 13.8292, -15.2739, -20.3898, 26.4407, 17.8609],
[-12.8771, -11.3956, 25.1225, 14.7973, -3.5396, 7.2677],
]
)
expected_sim_i2t_m = Tensor(
[
[2.0358, -13.9559, -14.8056, 5.6649, 19.6189, 7.0686],
[4.7981, -13.0741, -18.6137, 4.6502, 18.0892, 1.2024],
]
)
expected_sim_t2i_m = Tensor(
[
[2.0358, 4.7981, 7.9365, -9.1906, 28.4402, 29.4093],
[-13.9559, -13.0741, 24.3506, 17.6918, -10.5707, 0.4952],
]
)
assert_expected(output.sim_i2t, expected_sim_i2t, rtol=0, atol=1e-4)
assert_expected(output.sim_t2i, expected_sim_t2i, rtol=0, atol=1e-4)
assert_expected(output.sim_i2t_m, expected_sim_i2t_m, rtol=0, atol=1e-4)
assert_expected(output.sim_t2i_m, expected_sim_t2i_m, rtol=0, atol=1e-4)
def test_neg_embeddings(albef_with_sim):
image_embeds = torch.randn(2, 1, 3)
text_embeds = torch.randn(2, 1, 3)
text_atts = torch.randn(2, 1)
similarity = ALBEFSimilarity(
sim_i2t=torch.randn(2, 5),
sim_t2i=torch.randn(2, 5),
sim_i2t_m=torch.randn(2, 5),
sim_t2i_m=torch.randn(2, 5),
)
image_embeds_neg, text_embeds_neg, text_atts_neg = albef_with_sim._neg_embeddings(
image_embeds, text_embeds, text_atts, similarity
)
expected_image_embeds_neg = Tensor(
[[[1.917750, 1.748151, 0.901075]], [[-0.193372, -1.123208, 2.178921]]]
)
expected_text_embeds_neg = Tensor(
[[[-0.0520, 0.4082, -1.4286]], [[0.0278, 0.7572, -1.7793]]]
)
expected_text_atts_neg = Tensor([[-0.5061], [0.0827]])
assert_expected(image_embeds_neg, expected_image_embeds_neg, rtol=0, atol=1e-4)
assert_expected(text_embeds_neg, expected_text_embeds_neg, rtol=0, atol=1e-4)
assert_expected(text_atts_neg, expected_text_atts_neg, rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/albef/test_albef.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/albef/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import Tensor
from torchmultimodal.models.albef.multimodal_encoder import ALBEFMultimodalEncoder
@pytest.fixture(autouse=True)
def multimodal_encoder():
set_rng_seed(0)
return ALBEFMultimodalEncoder(hidden_size=3, num_attention_heads=1)
def test_multimodal_encoder(multimodal_encoder):
image_embeds = torch.randn(2, 2, 3)
text_embeds = torch.randn(2, 2, 3)
text_atts = torch.Tensor([[1, 1], [1, 0]])
output = multimodal_encoder(text_embeds, text_atts, image_embeds)
expected = Tensor(
[
[[0.794870, 0.615549, -1.410419], [1.314163, -1.109555, -0.204607]],
[[-0.862034, -0.539896, 1.401930], [-1.176761, -0.090902, 1.267663]],
]
)
assert_expected(output, expected, rtol=0, atol=1e-4)
def test_invalid_image_hidden_size(multimodal_encoder):
image_embeds = torch.randn(2, 2, 4)
text_embeds = torch.randn(2, 2, 3)
text_atts = torch.Tensor([[1, 1], [1, 0]])
with pytest.raises(RuntimeError):
multimodal_encoder(image_embeds, text_embeds, text_atts)
def test_invalid_text_hidden_size(multimodal_encoder):
image_embeds = torch.randn(2, 2, 3)
text_embeds = torch.randn(2, 2, 4)
text_atts = torch.Tensor([[1, 1], [1, 0]])
with pytest.raises(RuntimeError):
multimodal_encoder(image_embeds, text_embeds, text_atts)
def test_not_matching_input_batch_size(multimodal_encoder):
image_embeds = torch.randn(2, 2, 3)
text_embeds = torch.randn(3, 2, 3)
text_atts = torch.Tensor([[1, 1], [1, 0], [1, 1]])
with pytest.raises(RuntimeError):
multimodal_encoder(image_embeds, text_embeds, text_atts)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/albef/test_multimodal_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import Tensor
from torchmultimodal.models.albef.image_encoder import ALBEFVisionEncoder
class TestALBEFVisionEncoder:
set_rng_seed(0)
torch.set_printoptions(precision=6)
vision_encoder = ALBEFVisionEncoder(
image_size=4,
patch_size=4,
num_hidden_layers=2,
num_attention_heads=1,
hidden_size=3,
mlp_dim=6,
)
def test_vision_transformer(self):
set_rng_seed(0)
vit = self.vision_encoder
input = torch.randn(1, 3, 4, 4)
output = vit(input)
expected = Tensor(
[
[1.399478, -0.875986, -0.523492],
[-0.869867, 1.400589, -0.530722],
]
).unsqueeze(0)
assert_expected(output, expected, rtol=0, atol=1e-4)
def test_invalid_input_length(self):
input = torch.randn(3, 4, 4)
with pytest.raises(IndexError, match="index out of range"):
self.vision_encoder(input)
def test_invalid_image_channel_dim(self):
input = torch.rand(1, 1, 4, 4)
with pytest.raises(RuntimeError, match="channels"):
self.vision_encoder(input)
def test_invalid_image_height(self):
input = torch.rand(1, 3, 5, 4)
with pytest.raises(AssertionError, match="Wrong image height!"):
self.vision_encoder(input)
def test_invalid_image_width(self):
input = torch.rand(1, 3, 4, 3)
with pytest.raises(AssertionError, match="Wrong image width!"):
self.vision_encoder(input)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/albef/test_image_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.mdetr.text_encoder import ModifiedTransformerEncoder
@pytest.fixture(autouse=True)
def random():
set_rng_seed(0)
class TestModifiedTransformerEncoder:
@pytest.fixture
def hidden_size(self):
return 768
@pytest.fixture
def batch_size(self):
return 2
@pytest.fixture
def input_length(self):
return 16
@pytest.fixture
def encoder_input(self, batch_size, input_length, hidden_size):
return torch.rand((batch_size, input_length, hidden_size))
@pytest.fixture
def attention_mask(self, batch_size, input_length):
return torch.randint(0, 2, (batch_size, input_length), dtype=bool)
@pytest.fixture
def encoder(self, hidden_size):
return ModifiedTransformerEncoder(
embedding_dim=hidden_size,
ffn_dimension=3072,
num_attention_heads=12,
num_encoder_layers=12,
dropout=0.1,
normalize_before=False,
)
def test_mdetr_modified_transformer(
self,
batch_size,
input_length,
hidden_size,
encoder_input,
attention_mask,
encoder,
):
expected = torch.Tensor(
[
0.6401,
0.2591,
0.7217,
0.5619,
0.3337,
0.2425,
0.3801,
0.3394,
0.2731,
0.2023,
0.2436,
0.1918,
0.6731,
0.3916,
0.5608,
0.1991,
]
)
out = encoder(encoder_input, attention_mask)
actual = out.last_hidden_state[1, :, 1]
assert_expected(
out.last_hidden_state.size(),
torch.Size((batch_size, input_length, hidden_size)),
)
assert_expected(actual, expected, rtol=0.0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/mdetr/test_text_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.mdetr.model import (
mdetr_for_phrase_grounding,
mdetr_for_vqa,
mdetr_gqa_heads,
mdetr_resnet101,
)
class TestMDETR:
@pytest.fixture(autouse=True)
def rng(self):
set_rng_seed(0)
@pytest.fixture(autouse=True)
def batch_size(self):
return 2
@pytest.fixture(autouse=True)
def num_queries(self):
return 100
@pytest.fixture(autouse=True)
def num_classes(self):
return 255
@pytest.fixture()
def test_tensors(self):
return torch.rand(2, 3, 64, 64).unbind(dim=0)
@pytest.fixture()
def input_ids(self):
return torch.Tensor(
[
[0, 100, 64, 192, 5, 3778, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[
0,
1708,
190,
114,
38,
1395,
192,
5,
3778,
6,
38,
216,
14,
24,
8785,
2,
],
]
).to(dtype=torch.long)
@pytest.fixture()
def mdetr(self, num_queries, num_classes):
mdetr = mdetr_resnet101(num_queries=num_queries, num_classes=num_classes)
mdetr.eval()
return mdetr
def test_mdetr_model(
self,
mdetr,
test_tensors,
input_ids,
batch_size,
num_queries,
num_classes,
):
out = mdetr(test_tensors, input_ids)
logits_actual = out.pred_logits
boxes_actual = out.pred_boxes
logits_expected = torch.Tensor(
[
-0.0145,
0.0121,
0.0270,
0.0310,
0.0072,
-0.0002,
0.0100,
0.0012,
0.0290,
0.0067,
]
)
boxes_expected = torch.Tensor(
[
0.4896,
0.4898,
0.4897,
0.4900,
0.4894,
0.4895,
0.4897,
0.4908,
0.4902,
0.4899,
]
)
assert logits_actual.size() == (
batch_size,
num_queries,
num_classes + 1,
)
assert boxes_actual.size() == (batch_size, num_queries, 4)
assert_expected(logits_actual[1, :10, 1], logits_expected, rtol=0, atol=1e-3)
assert_expected(boxes_actual[1, :10, 1], boxes_expected, rtol=0, atol=1e-3)
@pytest.fixture(autouse=True)
def contrastive_dim(self):
return 64
@pytest.fixture()
def mdetr_model_for_phrase_grounding(self, contrastive_dim):
return mdetr_for_phrase_grounding(contrastive_dim=contrastive_dim)
def test_mdetr_model_for_phrase_grounding(
self,
mdetr_model_for_phrase_grounding,
test_tensors,
input_ids,
batch_size,
num_queries,
contrastive_dim,
):
out = mdetr_model_for_phrase_grounding(test_tensors, input_ids)
logits_actual = out.model_output.pred_logits
boxes_actual = out.model_output.pred_boxes
logits_expected = torch.Tensor(
[
-0.1245,
-0.5103,
0.2710,
-0.2171,
-0.0561,
0.2635,
0.2804,
-0.0415,
0.2091,
0.0110,
]
)
boxes_expected = torch.Tensor(
[
0.4789,
0.4920,
0.4898,
0.4905,
0.4765,
0.4794,
0.4932,
0.4683,
0.4845,
0.4789,
]
)
assert_expected(logits_actual[1, :10, 1], logits_expected, rtol=0, atol=1e-3)
assert_expected(boxes_actual[1, :10, 1], boxes_expected, rtol=0, atol=1e-3)
query_embeddings_actual = out.contrastive_embeddings.query_embeddings
token_embeddings_actual = out.contrastive_embeddings.token_embeddings
query_embeddings_expected = torch.Tensor(
[
0.3083,
0.3146,
0.3221,
0.2411,
0.2673,
0.3152,
0.2798,
0.2321,
0.2433,
0.2321,
]
)
token_embeddings_expected = torch.Tensor(
[
0.2002,
0.1153,
0.1196,
0.2104,
0.1716,
0.1975,
0.1587,
0.1740,
0.1350,
0.1383,
]
)
assert query_embeddings_actual.size() == (
batch_size,
num_queries,
contrastive_dim,
)
assert token_embeddings_actual.size() == (
batch_size,
input_ids.size()[1],
contrastive_dim,
)
assert_expected(
query_embeddings_actual[1, :10, 1],
query_embeddings_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
token_embeddings_actual[1, :10, 1],
token_embeddings_expected,
rtol=0,
atol=1e-3,
)
@pytest.fixture()
def mdetr_model_for_vqa(self):
model = mdetr_for_vqa(vqa_heads=mdetr_gqa_heads())
model.eval()
return model
def test_mdetr_model_for_vqa(
self,
mdetr_model_for_vqa,
test_tensors,
input_ids,
):
out = mdetr_model_for_vqa(test_tensors, input_ids)
logits_actual = out.model_output.pred_logits
boxes_actual = out.model_output.pred_boxes
logits_expected = torch.Tensor(
[
-0.7126,
-0.6930,
-0.6905,
-0.7025,
-0.6926,
-0.6882,
-0.7152,
-0.6839,
-0.6889,
-0.7048,
]
)
boxes_expected = torch.Tensor(
[
0.4744,
0.4744,
0.4738,
0.4745,
0.4739,
0.4739,
0.4748,
0.4736,
0.4744,
0.4751,
]
)
assert_expected(logits_actual[1, :10, 1], logits_expected, rtol=0, atol=1e-3)
assert_expected(boxes_actual[1, :10, 1], boxes_expected, rtol=0, atol=1e-3)
query_embeddings_actual = out.contrastive_embeddings.query_embeddings
token_embeddings_actual = out.contrastive_embeddings.token_embeddings
query_embeddings_expected = torch.Tensor(
[
-0.0871,
-0.0867,
-0.0875,
-0.0897,
-0.0884,
-0.0876,
-0.0861,
-0.0897,
-0.0858,
-0.0840,
]
)
token_embeddings_expected = torch.Tensor(
[
-0.2426,
-0.2365,
-0.2369,
-0.2321,
-0.2325,
-0.2430,
-0.2335,
-0.2370,
-0.2253,
-0.2358,
]
)
assert_expected(
query_embeddings_actual[1, :10, 1],
query_embeddings_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
token_embeddings_actual[1, :10, 1],
token_embeddings_expected,
rtol=0,
atol=1e-3,
)
# Finally, check the vqa heads
answer_type_actual = out.vqa_preds["answer_type"]
answer_obj_actual = out.vqa_preds["answer_obj"]
answer_rel_actual = out.vqa_preds["answer_rel"]
answer_attr_actual = out.vqa_preds["answer_attr"]
answer_cat_actual = out.vqa_preds["answer_cat"]
answer_global_actual = out.vqa_preds["answer_global"]
answer_type_expected = torch.Tensor([0.0760, 0.4733, -1.0210, 0.1674, 0.0846])
answer_obj_expected = torch.Tensor([-0.3186, -0.0333, -0.5663])
answer_rel_expected = torch.Tensor(
[
0.2682,
0.2664,
0.1586,
-0.0358,
-1.3060,
0.0389,
0.7450,
-0.5142,
0.1172,
-0.7142,
]
)
answer_attr_expected = torch.Tensor(
[
0.1528,
0.4297,
0.2812,
0.2316,
0.1397,
1.8865,
0.1437,
-0.5668,
0.6351,
-0.6617,
]
)
answer_cat_expected = torch.Tensor(
[
-0.7169,
-0.5856,
0.3027,
-0.1104,
-0.9241,
0.7520,
0.6357,
1.6550,
-0.4437,
-0.2308,
]
)
answer_global_expected = torch.Tensor(
[
0.1416,
-0.1790,
0.6460,
0.5342,
-0.9321,
-0.7176,
0.8754,
0.1958,
-0.5698,
0.1433,
]
)
assert_expected(
answer_type_actual[0],
answer_type_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
answer_obj_actual[0],
answer_obj_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
answer_rel_actual[0, :10],
answer_rel_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
answer_attr_actual[0, :10],
answer_attr_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
answer_cat_actual[0, :10],
answer_cat_expected,
rtol=0,
atol=1e-3,
)
assert_expected(
answer_global_actual[0, :10],
answer_global_expected,
rtol=0,
atol=1e-3,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/mdetr/test_mdetr.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/mdetr/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.mdetr.transformer import MDETRTransformer
class TestMDETRTransformer:
@pytest.fixture(autouse=True)
def rng(self):
set_rng_seed(0)
@pytest.fixture(autouse=True)
def batch_size(self):
return 2
@pytest.fixture(autouse=True)
def num_queries(self):
return 4
@pytest.fixture(autouse=True)
def mm_dim(self):
return 11
@pytest.fixture(autouse=True)
def embedding_dim(self):
return 256
@pytest.fixture(autouse=True)
def num_decoder_layers(self):
return 6
@pytest.fixture()
def src(self, mm_dim, batch_size, embedding_dim):
return torch.randn(mm_dim, batch_size, embedding_dim)
@pytest.fixture()
def src_key_padding_mask(self, batch_size, mm_dim):
return torch.randint(0, 2, (batch_size, mm_dim)).bool()
@pytest.fixture()
def pos(self, mm_dim, batch_size, embedding_dim):
return torch.randn(mm_dim, batch_size, embedding_dim)
@pytest.fixture()
def tgt(self, num_queries, batch_size, embedding_dim):
return torch.randn(num_queries, batch_size, embedding_dim)
@pytest.fixture()
def memory(self, mm_dim, batch_size, embedding_dim):
return torch.randn(mm_dim, batch_size, embedding_dim)
@pytest.fixture()
def memory_key_padding_mask(self, batch_size, mm_dim):
return torch.randint(0, 2, (batch_size, mm_dim)).bool()
@pytest.fixture()
def query_pos(self, num_queries, batch_size, embedding_dim):
return torch.randn(num_queries, batch_size, embedding_dim)
@pytest.fixture()
def transformer(self, embedding_dim, num_decoder_layers):
transformer = MDETRTransformer(
d_model=embedding_dim, num_decoder_layers=num_decoder_layers
)
transformer.eval()
return transformer
def test_transformer_encoder(
self,
transformer,
src,
src_key_padding_mask,
pos,
mm_dim,
batch_size,
embedding_dim,
):
actual = transformer.encoder(
src=src, src_key_padding_mask=src_key_padding_mask, pos=pos
)
assert actual.size() == (mm_dim, batch_size, embedding_dim)
expected = torch.Tensor([0.5081, 2.2849])
assert_expected(actual[1, :, 1], expected, rtol=0, atol=1e-3)
def test_transformer_decoder(
self,
transformer,
tgt,
memory,
memory_key_padding_mask,
pos,
query_pos,
num_decoder_layers,
num_queries,
batch_size,
embedding_dim,
):
actual = transformer.decoder(
tgt=tgt,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
assert actual.size() == (
num_decoder_layers,
num_queries,
batch_size,
embedding_dim,
)
expected = torch.Tensor(
[[-2.1366, 0.4760], [-2.1231, 0.4731], [-1.1372, 0.3629], [-1.2459, 0.1853]]
)
assert_expected(actual[1, :, :, 1], expected, rtol=0, atol=1e-3)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/mdetr/test_transformer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.models.mdetr.image_encoder import mdetr_resnet101_backbone
class TestMDETRImageEncoder(unittest.TestCase):
def setUp(self):
set_rng_seed(0)
self.test_tensor = torch.rand(4, 3, 64, 64)
self.mask = torch.zeros(4, 64, 64)
self.resnet101_encoder = mdetr_resnet101_backbone()
self.resnet101_encoder.eval()
def test_resnet_101_forward(self):
# Taken from [:, 2, :, :] of forward outputs from
# MDETR backbone with pretrained ImageNetV1 weights
expected = torch.Tensor(
[
[[0.4230, 0.9407], [0.8498, 0.5046]],
[[1.1702, 1.6584], [1.4689, 1.7062]],
[[1.3003, 1.7222], [2.2372, 1.8877]],
[[1.5309, 2.1169], [1.6040, 1.6911]],
]
)
# Get corresponding slice from last layer of outputs
out, _ = self.resnet101_encoder(self.test_tensor, self.mask)
actual = out[:, 2, :, :]
self.assertEqual(out.size(), (4, 2048, 2, 2))
assert_expected(actual, expected, rtol=0.0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/models/mdetr/test_image_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.transforms.video_transform import VideoTransform
class TestVideoTransform:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self, set_seed):
input_videos = 255 * torch.rand(size=(2, 6, 4, 5, 3)).to(dtype=float)
transform = VideoTransform(
time_samples=1,
mean=(0.5, 0.5, 0.5),
std=(0.2857, 0.2857, 0.2857),
resize_shape=(6, 7),
)
return transform, input_videos
def test_call(self, utils):
transform, input_videos = utils
out = transform(input_videos)
assert_expected(actual=out.shape, expected=torch.Size([2, 3, 1, 6, 7]))
assert_expected(
actual=out.mean(), expected=torch.as_tensor(0.0), rtol=0, atol=5e-2
)
def test_wrong_channels(self, utils):
transform, input_videos = utils
with pytest.raises(ValueError):
transform(input_videos[:, :, :, :, :2]) # only two channels
def test_sample_frames(self, utils):
transform, input_videos = utils
out = transform.sample_frames(input_videos)
assert_expected(actual=out.shape, expected=torch.Size([2, 1, 4, 5, 3]))
def test_resize_hw(self, utils):
transform, input_videos = utils
out = transform.resize_hw(input_videos)
assert_expected(actual=out.shape, expected=torch.Size([2, 6, 6, 7, 3]))
def test_normalize(self, utils):
transform, input_videos = utils
out = transform.normalize(input_videos)
assert_expected(actual=out.shape, expected=torch.Size([2, 6, 4, 5, 3]))
assert_expected(
actual=out.mean(), expected=torch.as_tensor(0.0), rtol=0, atol=5e-2
)
assert_expected(
actual=out.std(), expected=torch.as_tensor(1.0), rtol=0, atol=5e-2
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/transforms/test_video_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/transforms/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.transforms.bert_text_transform import BertTextTransform
class TestBertTextTransform:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
@pytest.fixture
def utils(self, set_seed):
tokenizer = BertTextTransform()
return tokenizer
def test_single_transform(self, utils):
tokenizer = utils
text = "raw text sample for testing tokenizer"
out = tokenizer(text)
assert_expected(
actual=out,
expected=torch.as_tensor(
[101, 6315, 3793, 7099, 2005, 5604, 19204, 17629, 102]
),
)
def test_multi_transform(self, utils):
tokenizer = utils
text = ["raw text sample for testing tokenizer", "second shorter text"]
out = tokenizer(text)
assert_expected(
actual=out,
expected=torch.as_tensor(
[
[101, 6315, 3793, 7099, 2005, 5604, 19204, 17629, 102],
[101, 2117, 7820, 3793, 102, 0, 0, 0, 0],
]
),
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/transforms/test_bert_text_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, get_asset_path, set_rng_seed
from torchmultimodal.transforms.clip_transform import (
CLIPImageTransform,
CLIPTextTransform,
CLIPTransform,
)
from torchvision.transforms import ToPILImage
class TestCLIPTransform:
@pytest.fixture()
def context_length(self):
return 77
@pytest.fixture()
def image1(self):
return ToPILImage()(torch.ones(3, 300, 500))
@pytest.fixture()
def image2(self):
return ToPILImage()(torch.ones(3, 50, 100))
@pytest.fixture()
def text1(self):
return "Taken with my analogue EOS 500N with black & white film."
@pytest.fixture()
def text2(self):
return "This is a shorter sentence."
@pytest.fixture()
def long_text(self, text1):
return (text1 + " ") * 20
@pytest.fixture()
def text1_tokens(self):
return [
49406,
2807,
593,
607,
46031,
17805,
276,
271,
271,
333,
593,
1449,
261,
1579,
1860,
269,
49407,
]
@pytest.fixture()
def bpe_merges_file(self):
return get_asset_path("clip_vocab.bpe")
@pytest.fixture()
def clip_transform(self, bpe_merges_file):
return CLIPTransform(text_bpe_merges_path=bpe_merges_file)
def setUp(self):
set_rng_seed(1234)
def test_clip_single_transform(
self,
context_length,
image1,
text1,
text1_tokens,
clip_transform,
):
transformed_image, transformed_text = clip_transform(image=image1, text=text1)
actual_image_size = transformed_image.size()
expected_image_size = torch.Size([3, 224, 224])
assert_expected(actual_image_size, expected_image_size)
actual_text = transformed_text
text1_token_len = len(text1_tokens)
expected_text = torch.tensor(
text1_tokens + [0] * (context_length - text1_token_len),
dtype=torch.long,
)
assert_expected(actual_text, expected_text)
def test_clip_multi_transform(
self,
context_length,
image1,
image2,
text1,
text2,
long_text,
text1_tokens,
clip_transform,
):
images = [image1] * 5 + [image2] * 2
texts = [text1] * 5 + [text2] + [long_text]
transformed_images, transformed_texts = clip_transform(image=images, text=texts)
actual_images_size = transformed_images.size()
expected_images_size = torch.Size([7, 3, 224, 224])
assert_expected(actual_images_size, expected_images_size)
actual_texts_size = transformed_texts.size()
expected_texts_size = torch.Size([7, context_length])
assert_expected(actual_texts_size, expected_texts_size)
# Check encoding of long text
actual_long_text = transformed_texts[-1]
bos_token = text1_tokens[0]
eos_token = text1_tokens[-1]
expected_long_text = torch.tensor(
[bos_token] + (text1_tokens[1:-1] * 20)[: context_length - 2] + [eos_token],
dtype=torch.long,
)
assert_expected(actual_long_text, expected_long_text)
# Check zero padding for short texts
text1_token_len = len(text1_tokens)
actual_zero_pad_val = transformed_texts[:-1, text1_token_len:].max()
expected_zero_pad_val = torch.tensor(0)
assert_expected(actual_zero_pad_val, expected_zero_pad_val)
def test_clip_image_transform_int_resize(self, image1):
image_transform = CLIPImageTransform(is_train=False)
# check the first transform which corresponds to the resize
transformed_image = image_transform.image_transform.transforms[0](image1)
actual_image_size = transformed_image.size
expected_image_size = (373, 224)
assert_expected(actual_image_size, expected_image_size)
def test_clip_image_transform_tuple_resize(self, image1):
image_transform = CLIPImageTransform(image_size=(224, 224), is_train=False)
# check the first transform which corresponds to the resize
transformed_image = image_transform.image_transform.transforms[0](image1)
actual_image_size = transformed_image.size
expected_image_size = (224, 224)
assert_expected(actual_image_size, expected_image_size)
# Only text transforms require torchscripting for now based on user needs
def test_scripting_text_transform(self, text1, bpe_merges_file):
text_transform = CLIPTextTransform(text_bpe_merges_path=bpe_merges_file)
scripted_text_transform = torch.jit.script(text_transform)
assert_expected(text_transform(text1), scripted_text_transform(text1))
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/transforms/test_clip_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
from torchvision import transforms
class TestFLAVAImageTransform:
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(1234)
def test_image_transform_train(self):
transform = FLAVAImageTransform(
encoder_input_size=3,
codebook_input_size=3,
mask_max_patches=1,
mask_min_patches=1,
mask_num_patches=1,
)
input = transforms.ToPILImage()(torch.ones(2, 2))
out = transform(input)
expected_image = torch.Tensor(
[
[
[1.9303, 1.9303, 1.9303],
[1.9303, 1.9303, 1.9303],
[1.9303, 1.9303, 1.9303],
],
[
[2.0749, 2.0749, 2.0749],
[2.0749, 2.0749, 2.0749],
[2.0749, 2.0749, 2.0749],
],
[
[2.1459, 2.1459, 2.1459],
[2.1459, 2.1459, 2.1459],
[2.1459, 2.1459, 2.1459],
],
]
)
assert_expected(out["image"], expected_image, atol=1e-4, rtol=1e-4)
assert_expected(out["image_for_codebook"], torch.full((3, 3, 3), 0.9))
assert out["image_patches_mask"].sum() == 1
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/transforms/test_flava_transform.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import Tensor
from torchmultimodal.modules.losses.albef import (
CausalLanguageModelingLoss,
ImageTextContrastiveLoss,
)
class TestImageTextContrastiveLoss:
@pytest.fixture(autouse=True)
def setup(self):
set_rng_seed(0)
self.loss = ImageTextContrastiveLoss()
def test_itc_loss_invalid_sim(self):
sim_i2t = torch.randn(2, 4) # all inputs should be the same size
sim_t2i = torch.randn(2, 3)
with pytest.raises(RuntimeError):
self.loss(sim_i2t, sim_t2i)
def test_itc_loss_missing_sim_m(self):
# need momentum similarity inputs for ImageTextContrastiveLoss with nonzero alpha
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
with pytest.raises(AssertionError):
self.loss(sim_i2t, sim_t2i, alpha=0.4)
def test_itc_loss_invalid_sim_m(self):
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
sim_i2t_m = torch.randn(2, 4) # all inputs should be the same size
sim_t2i_m = torch.randn(2, 3)
with pytest.raises(RuntimeError):
self.loss(sim_i2t, sim_t2i, sim_i2t_m, sim_t2i_m, alpha=0.4)
def test_itc_loss_invalid_sim_target(self):
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
sim_targets = torch.randn(2, 4) # all inputs should be the same size
with pytest.raises(RuntimeError):
self.loss(sim_i2t, sim_t2i, sim_targets=sim_targets)
def test_itc_loss_without_distillation(self):
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
output = self.loss(sim_i2t, sim_t2i).item()
expected = 1.160506
assert_expected(output, expected, rtol=0, atol=1e-4)
def test_itc_loss_with_distillation(self):
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
sim_i2t_m = torch.randn(2, 3)
sim_t2i_m = torch.randn(2, 3)
output = self.loss(sim_i2t, sim_t2i, sim_i2t_m, sim_t2i_m, alpha=0.4).item()
expected = 1.341230
assert_expected(output, expected, rtol=0, atol=1e-4)
def test_itc_loss_with_sim_targets(self):
sim_i2t = torch.randn(2, 3)
sim_t2i = torch.randn(2, 3)
sim_i2t_m = torch.randn(2, 3)
sim_t2i_m = torch.randn(2, 3)
sim_targets = torch.randn(2, 3)
output = self.loss(
sim_i2t, sim_t2i, sim_i2t_m, sim_t2i_m, sim_targets, alpha=0.4
).item()
expected = -0.512445
assert_expected(output, expected, rtol=0, atol=1e-4)
class TestCausalLanguageModelingLoss:
@pytest.fixture(autouse=True)
def setup(self):
set_rng_seed(0)
self.loss = CausalLanguageModelingLoss()
def test_mlm_loss_invalid_labels(self):
# labels dimensions should match the first two dimensions of prediction_scores
labels = torch.randint(10, (2, 6))
prediction_scores = torch.randn(2, 5, 20)
with pytest.raises(ValueError):
self.loss(labels, prediction_scores)
def test_mlm_loss_missing_momentum_embeddings(self):
# need prediction_scores_m input for CausalLanguageModelingLoss with nonzero alpha
labels = torch.randint(10, (2, 5))
prediction_scores = torch.randn(2, 5, 20)
alpha = 0.4
with pytest.raises(AssertionError):
self.loss(labels, prediction_scores, alpha=alpha)
def test_mlm_loss(self):
labels = torch.randint(10, (2, 5))
prediction_scores = torch.randn(2, 5, 20)
output = self.loss(labels, prediction_scores)
expected = Tensor([14.552961, 14.930183])
assert_expected(output, expected, rtol=0, atol=1e-4)
def test_mlm_loss_with_distillation(self):
labels = torch.randint(10, (2, 5))
prediction_scores = torch.randn(2, 5, 20)
prediction_scores_m = torch.randn(2, 5, 20)
alpha = 0.4
output = self.loss(labels, prediction_scores, prediction_scores_m, alpha)
expected = Tensor([14.367424, 14.541029])
assert_expected(output, expected, rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/losses/test_albef.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/losses/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.modules.losses.mdetr import box_losses, soft_token_prediction_loss
from torchvision.ops.boxes import box_convert
@pytest.fixture(autouse=True)
def rng():
set_rng_seed(1)
class TestMDETRLosses:
@pytest.fixture()
def batch_size(self):
return 2
@pytest.fixture()
def num_queries(self):
return 10
@pytest.fixture()
def num_classes(self):
return 15
@pytest.fixture()
def pred_logits(self, batch_size, num_queries, num_classes):
return torch.randn(batch_size, num_queries, num_classes + 1)
@pytest.fixture()
def construct_valid_boxes(self):
def _construct_valid_boxes(n_boxes):
boxes = []
for _ in range(n_boxes):
x1, y1 = torch.rand(2).unbind(-1)
x2 = random.uniform(x1.item(), 1)
y2 = random.uniform(y1.item(), 1)
box = box_convert(
torch.Tensor([x1, y1, x2, y2]), in_fmt="xyxy", out_fmt="cxcywh"
)
boxes.append(box)
return torch.stack(boxes)
return _construct_valid_boxes
@pytest.fixture()
def pred_boxes(self, construct_valid_boxes, batch_size, num_queries):
return construct_valid_boxes(batch_size * num_queries).reshape(
batch_size, num_queries, -1
)
def test_soft_token_prediction_loss(self, pred_logits):
indices = [
(torch.LongTensor([4, 5, 9]), torch.LongTensor([1, 0, 2])),
(
torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
torch.LongTensor([9, 8, 3, 4, 5, 6, 7, 1, 0, 2]),
),
]
n_boxes_per_sample = [3, 10]
num_boxes = 19
positive_map = torch.Tensor(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
[
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
]
)
actual = torch.Tensor(
soft_token_prediction_loss(
pred_logits, n_boxes_per_sample, positive_map, indices, num_boxes
)
)
expected = torch.tensor(5.2867)
assert_expected(actual, expected, rtol=0, atol=1e-3)
def test_box_losses(self, pred_boxes):
indices = [
(torch.LongTensor([4, 6, 7, 8, 9]), torch.LongTensor([3, 0, 4, 2, 1])),
(torch.LongTensor([1, 8]), torch.LongTensor([1, 0])),
]
num_boxes = 8
target_boxes = [
torch.Tensor(
[
[0.9941, 0.6071, 0.0070, 0.6372],
[0.9358, 0.6296, 0.1217, 0.2474],
[0.6058, 0.8187, 0.7384, 0.1234],
[0.5829, 0.6806, 0.6967, 0.0670],
[0.4472, 0.7152, 0.1831, 0.5401],
]
),
torch.Tensor(
[[0.2642, 0.6090, 0.4897, 0.6948], [0.8163, 0.6436, 0.0900, 0.5304]]
),
]
actual = box_losses(pred_boxes, target_boxes, indices, num_boxes)
expected_l1_loss = torch.tensor(0.8463)
expected_giou_loss = torch.tensor(1.2569)
assert_expected(actual.l1_loss, expected_l1_loss, rtol=0, atol=1e-3)
assert_expected(actual.giou_loss, expected_giou_loss, rtol=0, atol=1e-3)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/losses/test_mdetr_losses.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from tests.test_utils import assert_expected
from torchmultimodal.modules.losses.vqvae import CommitmentLoss
class TestCommitment(unittest.TestCase):
"""
Test the Commitment Loss
"""
def setUp(self):
self.quantized = torch.Tensor([[-1, 0, 1], [2, 1, 0]])
self.encoded = torch.Tensor([[-2, -1, 0], [0, 2, -2]])
self.commitment = CommitmentLoss()
def test_loss_value(self):
loss = self.commitment(self.quantized, self.encoded)
actual = loss.item()
expected = 2.0
assert_expected(actual, expected)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/losses/test_commitment.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import chain
from typing import List
import pytest
import torch
import torch.multiprocessing as mp
import torch.nn as nn
import torch.optim as optim
from tests.test_utils import (
assert_expected,
gpu_test,
init_distributed_on_file,
set_rng_seed,
with_temp_files,
)
from torch import distributed as dist, Tensor
from torchmultimodal.modules.losses.contrastive_loss_with_temperature import (
ContrastiveLossWithTemperature,
)
from torchmultimodal.utils.common import get_current_device
class TestContrastiveLossWithTemperature:
"""
Test the contrastive loss with temperature param
"""
@pytest.fixture(autouse=True)
def set_seed(self):
set_rng_seed(0)
torch.backends.cudnn.deterministic = True
@pytest.fixture()
def text_dim(self):
return 5
@pytest.fixture()
def image_dim(self):
return 8
@pytest.fixture()
def embedding_dim(self):
return 3
@pytest.fixture()
def global_batch_size(self):
return 4
@pytest.fixture()
def text_tensor(self, global_batch_size, text_dim):
return torch.randn(global_batch_size, text_dim)
@pytest.fixture()
def image_tensor(self, global_batch_size, image_dim):
return torch.randn(global_batch_size, image_dim)
@pytest.fixture()
def text_encoder(self, text_dim, embedding_dim):
return nn.Linear(text_dim, embedding_dim)
@pytest.fixture()
def image_encoder(self, image_dim, embedding_dim):
return nn.Linear(image_dim, embedding_dim)
def test_local_loss(self):
torch.manual_seed(1234)
clip_loss = ContrastiveLossWithTemperature()
clip_loss = clip_loss.to(get_current_device())
embeddings_a = torch.randn(3, 5)
embeddings_b = torch.randn(3, 5)
loss = clip_loss(embeddings_a=embeddings_a, embeddings_b=embeddings_b)
assert_expected(loss.item(), 9.8753, rtol=0, atol=1e-3)
def test_temperature_clamp_max(self):
torch.manual_seed(1234)
clip_loss_at_max = ContrastiveLossWithTemperature(
logit_scale=2, logit_scale_max=2
).to(get_current_device())
clip_loss_above_max = ContrastiveLossWithTemperature(
logit_scale=3, logit_scale_max=2
).to(get_current_device())
embeddings_a = torch.randn(3, 5)
embeddings_b = torch.randn(3, 5)
loss_at_max = clip_loss_at_max(embeddings_a, embeddings_b).item()
loss_above_max = clip_loss_above_max(embeddings_a, embeddings_b).item()
assert_expected(loss_above_max, loss_at_max, rtol=0, atol=1e-3)
def test_temperature_clamp_min(self):
torch.manual_seed(1234)
clip_loss_at_min = ContrastiveLossWithTemperature(
logit_scale=2, logit_scale_min=2
).to(get_current_device())
clip_loss_below_min = ContrastiveLossWithTemperature(
logit_scale=1, logit_scale_min=2
).to(get_current_device())
embeddings_a = torch.randn(3, 5)
embeddings_b = torch.randn(3, 5)
loss_at_min = clip_loss_at_min(embeddings_a, embeddings_b).item()
loss_below_min = clip_loss_below_min(embeddings_a, embeddings_b).item()
assert_expected(loss_below_min, loss_at_min, rtol=0, atol=1e-3)
def test_loss_with_ce_kwargs(self):
torch.manual_seed(1234)
clip_loss = ContrastiveLossWithTemperature()
clip_loss = clip_loss.to(get_current_device())
embeddings_a = torch.randn(3, 5)
embeddings_b = torch.randn(3, 5)
loss = clip_loss(
embeddings_a=embeddings_a,
embeddings_b=embeddings_b,
cross_entropy_kwargs={"label_smoothing": 0.1},
)
assert_expected(loss.item(), 10.2524, rtol=0, atol=1e-3)
def test_temperature_clamp_invalid(self):
with pytest.raises(ValueError):
ContrastiveLossWithTemperature(logit_scale_max=None, logit_scale_min=None)
@staticmethod
def _model_worker(
gpu_id: int,
sync_file: str,
world_size: int,
global_batch_size: int,
all_images: Tensor,
all_texts: Tensor,
image_encoder: nn.Module,
text_encoder: nn.Module,
):
init_distributed_on_file(
world_size=world_size, gpu_id=gpu_id, sync_file=sync_file
)
assert global_batch_size % world_size == 0
local_batch_size = global_batch_size // world_size
# Split images and text across GPUs
local_images = torch.split(all_images, local_batch_size)[gpu_id].cuda(gpu_id)
local_texts = torch.split(all_texts, local_batch_size)[gpu_id].cuda(gpu_id)
image_encoder = image_encoder.cuda(gpu_id)
text_encoder = text_encoder.cuda(gpu_id)
loss_fn = ContrastiveLossWithTemperature()
loss_fn = loss_fn.cuda(gpu_id)
all_params = chain(
image_encoder.parameters(), text_encoder.parameters(), loss_fn.parameters()
)
optimizer = optim.SGD(all_params, lr=1e-4)
# Forward pass
local_image_embeddings = image_encoder(local_images)
local_text_embeddings = text_encoder(local_texts)
loss = loss_fn(
local_image_embeddings, local_text_embeddings, backprop_in_gather=True
)
# Compute gradients
optimizer.zero_grad()
loss.backward()
# Gather gradients from all devices
def gather_grads(x: torch.Tensor) -> List[torch.Tensor]:
grads = [torch.zeros_like(x).cuda(gpu_id) for i in range(world_size)]
dist.all_gather(grads, x)
grad = torch.stack(grads).mean()
return grad
# Gather losses from all devices
gathered_loss = gather_grads(torch.Tensor([loss]).cuda(gpu_id))
assert_expected(gathered_loss.item(), 3.8848, rtol=0, atol=1e-3)
# Gradients for image encoder weights
img_encoder_weight_grad = gather_grads(image_encoder.weight.grad)
assert_expected(
img_encoder_weight_grad.mean().item(), 0.0979, rtol=0, atol=1e-3
)
# Gradients for text encoder bias
text_encoder_bias_grad = gather_grads(text_encoder.bias.grad)
assert_expected(
text_encoder_bias_grad.mean().item(), -1.8151, rtol=0, atol=1e-3
)
# Logit scale gradient
logit_scale_grad = gather_grads(loss_fn.logit_scale.grad)
assert_expected(logit_scale_grad.mean().item(), 3.6792, rtol=0, atol=1e-3)
@gpu_test(gpu_count=1)
def test_single_gpu_loss(
self, global_batch_size, image_tensor, text_tensor, image_encoder, text_encoder
):
with with_temp_files(count=1) as sync_file:
world_size = 1
mp.spawn(
TestContrastiveLossWithTemperature._model_worker,
(
sync_file,
world_size,
global_batch_size,
image_tensor,
text_tensor,
image_encoder,
text_encoder,
),
nprocs=world_size,
)
@gpu_test(gpu_count=2)
def test_multi_gpu_loss(
self, global_batch_size, image_tensor, text_tensor, image_encoder, text_encoder
):
with with_temp_files(count=1) as sync_file:
world_size = 2
mp.spawn(
TestContrastiveLossWithTemperature._model_worker,
(
sync_file,
world_size,
global_batch_size,
image_tensor,
text_tensor,
image_encoder,
text_encoder,
),
nprocs=world_size,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/losses/test_contrastive_loss_with_temperature.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/fusions/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch import nn
from torchmultimodal.modules.fusions.deepset_fusion import (
deepset_transformer,
DeepsetFusionModule,
DeepsetFusionWithTransformer,
)
from torchmultimodal.modules.layers.mlp import MLP
class TestDeepSetFusionModule(unittest.TestCase):
def setUp(self):
self.channel_to_encoder_dim = {
"channel_1": 3,
"channel_2": 3,
"channel_3": 3,
}
self.batch_size = 2
self.input = {}
self.input_bsz_1 = {}
for channel, dim in self.channel_to_encoder_dim.items():
self.input[channel] = torch.rand((self.batch_size, dim))
self.input_bsz_1[channel] = torch.rand((1, dim))
self.transformer = nn.TransformerEncoder(
encoder_layer=nn.TransformerEncoderLayer(3, 3, batch_first=True),
num_layers=1,
norm=nn.LayerNorm(3),
)
self.mlp = MLP(in_dim=3, out_dim=4)
def _do_assertions(self, fusion):
fused = fusion(self.input)
self.assertEqual(fused.size(), (self.batch_size, 4))
fused_bsz_1 = fusion(self.input_bsz_1)
self.assertEqual(fused_bsz_1.size(), (1, 4))
def test_deepset_sum(self):
fusion = DeepsetFusionModule(self.channel_to_encoder_dim, self.mlp, torch.sum)
self._do_assertions(fusion)
def test_deepset_mean(self):
fusion = DeepsetFusionModule(self.channel_to_encoder_dim, self.mlp, torch.mean)
self._do_assertions(fusion)
def test_deepset_median(self):
fusion = DeepsetFusionModule(
self.channel_to_encoder_dim, self.mlp, torch.median
)
self._do_assertions(fusion)
def test_deepset_min(self):
fusion = DeepsetFusionModule(self.channel_to_encoder_dim, self.mlp, torch.min)
self._do_assertions(fusion)
def test_deepset_max(self):
fusion = DeepsetFusionModule(self.channel_to_encoder_dim, self.mlp, torch.max)
self._do_assertions(fusion)
def test_deepset_invalid_pooling(self):
def random(x, dim):
return "random"
fusion = DeepsetFusionModule(self.channel_to_encoder_dim, self.mlp, random)
with self.assertRaises(ValueError):
fusion(self.input)
def test_deepset_auto_mapping(self):
fusion = DeepsetFusionModule(
self.channel_to_encoder_dim,
self.mlp,
torch.sum,
modality_normalize=True,
use_auto_mapping=True,
)
self._do_assertions(fusion)
def test_deepset_modality_normalize(self):
fusion = DeepsetFusionModule(
self.channel_to_encoder_dim,
self.mlp,
torch.sum,
modality_normalize=True,
)
self._do_assertions(fusion)
def test_deepset_apply_attention(self):
fusion = DeepsetFusionModule(
self.channel_to_encoder_dim,
self.mlp,
torch.sum,
modality_normalize=True,
apply_attention=True,
)
self._do_assertions(fusion)
def test_deepset_transformer(self):
fusion = DeepsetFusionWithTransformer(
self.channel_to_encoder_dim,
self.mlp,
self.transformer,
)
self._do_assertions(fusion)
def test_torchscript(self):
fusion = DeepsetFusionWithTransformer(
self.channel_to_encoder_dim,
self.mlp,
self.transformer,
)
torch.jit.script(fusion)
fusion = DeepsetFusionModule(
self.channel_to_encoder_dim,
self.mlp,
torch.sum,
)
torch.jit.script(fusion)
def test_get_deepset_transformer(self):
fusion = deepset_transformer(
self.channel_to_encoder_dim,
self.mlp,
num_transformer_att_heads=3,
)
self.assertTrue(isinstance(fusion, DeepsetFusionModule))
self.assertTrue(isinstance(fusion.pooling_function, nn.TransformerEncoder))
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/fusions/test_deepset_fusion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torchmultimodal.modules.fusions.attention_fusion import AttentionFusionModule
class TestAttentionFusionModule(unittest.TestCase):
def setUp(self):
self.channel_to_encoder_dim = {
"channel_1": 3,
"channel_2": 3,
"channel_3": 4,
}
self.batch_size = 2
self.input = {}
for channel, dim in self.channel_to_encoder_dim.items():
self.input[channel] = torch.rand((self.batch_size, dim))
def test_no_projection_dim(self):
fusion = AttentionFusionModule(self.channel_to_encoder_dim)
fused = fusion(self.input)
self.assertEqual(fused.size(), (self.batch_size, 3))
def test_input_projection_dim(self):
fusion = AttentionFusionModule(
self.channel_to_encoder_dim, encoding_projection_dim=2
)
fused = fusion(self.input)
self.assertEqual(fused.size(), (self.batch_size, 2))
def test_scripted_model(self):
fusion = AttentionFusionModule(
self.channel_to_encoder_dim, encoding_projection_dim=2
)
scripted_model = torch.jit.script(fusion)
fused = scripted_model(self.input)
self.assertEqual(fused.size(), (self.batch_size, 2))
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/fusions/test_attention_fusion.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from functools import partial
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn
from torchmultimodal.modules.layers.mlp import MLP
class TestMLP(unittest.TestCase):
"""
Test the MLP class
"""
def setUp(self):
torch.set_printoptions(precision=10)
set_rng_seed(0)
self.in_dim = 5
self.out_dim = 3
self.input = torch.randn((4, 5))
self.hidden_dims = [2, 6]
def test_no_hidden_layers(self):
mlp = MLP(in_dim=self.in_dim, out_dim=self.out_dim)
actual = mlp(self.input)
expected = torch.Tensor(
[
[0.165539, 0.455205, -0.331436],
[1.186858, -0.380429, -0.888067],
[0.813341, -1.444306, 0.507025],
[1.710142, -0.744562, -0.199996],
],
)
assert_expected(actual, expected)
def test_pass_hidden_dims(self):
mlp = MLP(
in_dim=self.in_dim, out_dim=self.out_dim, hidden_dims=self.hidden_dims
)
actual = mlp(self.input)
expected = torch.Tensor(
[
[-0.104062, 0.289350, 0.052587],
[-0.114036, 0.186682, 0.028555],
[0.243891, 0.085128, 0.087790],
[0.395047, 1.070629, -0.927500],
],
)
assert_expected(actual, expected)
def test_activation_and_normalization(self):
activation = torch.nn.LeakyReLU
normalization = partial(torch.nn.BatchNorm1d, eps=0.1)
mlp = MLP(
in_dim=self.in_dim,
out_dim=self.out_dim,
hidden_dims=self.hidden_dims,
activation=activation,
normalization=normalization,
)
actual = mlp(self.input)
expected = torch.Tensor(
[
[0.089560, 0.057747, -0.035710],
[-0.069851, -0.418727, -0.457506],
[-0.072189, -0.415917, -0.464918],
[0.348458, 0.898804, -0.778149],
]
)
assert_expected(actual, expected)
def test_dropout_default(self):
mlp = MLP(
in_dim=self.in_dim,
out_dim=self.out_dim,
hidden_dims=self.hidden_dims,
)
assert any(isinstance(layer, nn.Dropout) for layer in mlp.model.children())
def test_no_dropout(self):
mlp = MLP(
in_dim=self.in_dim,
out_dim=self.out_dim,
hidden_dims=self.hidden_dims,
dropout=0.0,
)
assert not all(isinstance(layer, nn.Dropout) for layer in mlp.model.children())
def test_torchscript(self):
mlp = MLP(in_dim=self.in_dim, out_dim=self.out_dim)
torch.jit.script(mlp)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_mlp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected
from torch import nn
from torchmultimodal.modules.layers.multi_head_attention import MultiHeadSelfAttention
class TestMultiHeadSelfAttention:
@pytest.fixture
def embed_dim(self):
return 4
@pytest.fixture
def multi_head_self_attn(self, embed_dim):
mhsa = MultiHeadSelfAttention(embed_dim, num_heads=2)
mhsa.input_proj.weight = nn.Parameter(torch.ones(3 * embed_dim, embed_dim))
mhsa.input_proj.bias = nn.Parameter(torch.ones(3 * embed_dim))
mhsa.output_proj.weight = nn.Parameter(torch.ones(embed_dim, embed_dim))
mhsa.output_proj.bias = nn.Parameter(torch.ones(embed_dim))
mhsa.eval()
return mhsa
def test_multi_head_self_attention(
self,
embed_dim,
multi_head_self_attn,
):
q = torch.Tensor([[[1, 2, 3, 1], [4, 3, 2, 1], [1, 1, 1, 1]]])
actual = multi_head_self_attn(q)
expected = torch.tensor(
[
[
[45.0, 45.0, 45.0, 45.0],
[45.0, 45.0, 45.0, 45.0],
[45.0, 45.0, 45.0, 45.0],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_scripting(
self,
embed_dim,
multi_head_self_attn,
):
q = torch.Tensor([[[1, 2, 3, 1], [4, 3, 2, 1], [1, 1, 1, 1]]])
scripted_model = torch.jit.script(multi_head_self_attn)
assert_expected(scripted_model(q), multi_head_self_attn(q), rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_multi_head_attention.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from tests.test_utils import assert_expected
from torchmultimodal.modules.layers.activation import SiLU
def test_sigmoid_linear_unit():
silu = SiLU()
actual = silu(torch.ones(3))
expected = torch.tensor([0.8458, 0.8458, 0.8458])
assert_expected(actual, expected)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_activation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchmultimodal.modules.layers.normalizations import Fp32GroupNorm, Fp32LayerNorm
def test_fp32layernorm():
x = torch.ones(1, 1, dtype=torch.float16)
norm = Fp32LayerNorm(1)
output = norm(x)
assert output.dtype == torch.float16
def test_fp32groupnorm():
x = torch.ones(2, 4, dtype=torch.float16)
norm = Fp32GroupNorm(2, 4)
output = norm(x)
assert output.dtype == torch.float16
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_normalizations.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected
from torch import nn
from torchmultimodal.modules.layers.position_embedding import (
BroadcastedPositionEmbedding,
)
class TestBroadcastedPositionEmbedding:
@pytest.fixture(scope="class")
def pos_emb(self):
_pos_emb = BroadcastedPositionEmbedding(
latent_shape=(1, 2, 3),
embedding_dim=6,
)
_pos_emb.embedding = nn.ParameterDict(
{
"d_0": nn.Parameter(torch.tensor([[0.0, 1.0]])),
"d_1": nn.Parameter(torch.tensor([[2.0, 3.0], [4.0, 5.0]])),
"d_2": nn.Parameter(torch.tensor([[6.0, 7.0], [8.0, 9.0], [0.0, 1.0]])),
}
)
return _pos_emb
def test_init_sets_embedding(self, pos_emb):
"""Test the embeddings are initialized with the correct dimensions"""
expected = [(1, 2), (2, 2), (3, 2)]
for i, (key, _) in enumerate(pos_emb.embedding.items()):
assert_expected(pos_emb.embedding[key].shape, expected[i])
def test_init_bad_embedding_dim(self):
"""Test raising error when the embedding dim is not allowed"""
with pytest.raises(ValueError):
BroadcastedPositionEmbedding(latent_shape=(1, 2, 3), embedding_dim=5)
def test_broadcast(self, pos_emb):
"""Test embedding along each dim is broadcasted correctly"""
expected = [
torch.tensor(
[
[
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]],
],
]
),
torch.tensor(
[
[
[[2.0, 3.0], [2.0, 3.0], [2.0, 3.0]],
[[4.0, 5.0], [4.0, 5.0], [4.0, 5.0]],
],
]
),
torch.tensor(
[
[
[[6.0, 7.0], [8.0, 9.0], [0.0, 1.0]],
[[6.0, 7.0], [8.0, 9.0], [0.0, 1.0]],
],
]
),
]
for i in range(pos_emb.n_dim):
assert_expected(pos_emb._broadcast(i), expected[i])
def test_forward(self, pos_emb):
"""Test the correct embeddings are returned for the given position ids"""
position_ids = torch.tensor([[1, 3, -1]])
actual = pos_emb(position_ids)
expected = torch.tensor(
[
[
[0.0, 1.0, 2.0, 3.0, 8.0, 9.0],
[0.0, 1.0, 4.0, 5.0, 6.0, 7.0],
[0.0, 1.0, 4.0, 5.0, 0.0, 1.0],
]
]
)
assert_expected(actual, expected)
def test_forward_invalid_input(self, pos_emb):
"""Test raising error when position ids contain illegal values"""
with pytest.raises(IndexError) as exc_info:
pos_emb(position_ids=torch.tensor([[-2, 0]]))
assert exc_info.value.args[0] == "Invalid position ids: tensor([-2])"
with pytest.raises(IndexError) as exc_info:
pos_emb(position_ids=torch.tensor([[0, 6]]))
assert exc_info.value.args[0] == "Invalid position ids: tensor([6])"
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_position_embedding.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from collections import OrderedDict
import pytest
import torch
from tests.test_utils import assert_expected, assert_expected_namedtuple, set_rng_seed
from torch import nn, tensor
from torchmultimodal.modules.layers.codebook import Codebook
@pytest.fixture(autouse=True)
def random_seed():
set_rng_seed(4)
@pytest.fixture
def num_embeddings():
return 4
@pytest.fixture
def embedding_dim():
return 5
@pytest.fixture
def encoded():
# This is 2x5x3
encoded = tensor(
[
[
[-1.0, 0.0, 1.0],
[2.0, 1.0, 0.0],
[0.0, -1.0, -1.0],
[0.0, 2.0, -1.0],
[-2.0, -1.0, 1.0],
],
[
[2.0, 2.0, -1.0],
[1.0, -1.0, -2.0],
[0.0, 0.0, 0.0],
[1.0, 2.0, 1.0],
[1.0, 0.0, 0.0],
],
]
)
encoded.requires_grad_()
return encoded
@pytest.fixture
def embedding_weights():
# This is 4x5
return tensor(
[
[1.0, 0.0, -1.0, -1.0, 2.0],
[2.0, -2.0, 0.0, 0.0, 1.0],
[2.0, 1.0, 0.0, 1.0, 1.0],
[-1.0, -2.0, 0.0, 2.0, 0.0],
]
)
@pytest.fixture
def input_tensor_flat():
# This is 4x3
return tensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
@pytest.fixture
def codebook(num_embeddings, embedding_dim):
return Codebook(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
decay=0.3,
)
class TestCodebook:
def test_quantized_output(self, codebook, embedding_weights, encoded):
codebook.embedding = embedding_weights
codebook._is_embedding_init = True
actual = codebook(encoded)
# This is shape (2,5,3)
expected_quantized = tensor(
[
[
[2.0, 2.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 0.0, -1.0],
[1.0, 1.0, -1.0],
[1.0, 1.0, 2.0],
],
[
[2.0, 2.0, -1.0],
[1.0, -2.0, -2.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 2.0],
[1.0, 1.0, 0.0],
],
]
)
expected_quantized_flat = (
expected_quantized.permute(0, 2, 1).contiguous().view(-1, 5)
)
expected = {
"encoded_flat": encoded.permute(0, 2, 1).contiguous().view(-1, 5),
"quantized_flat": expected_quantized_flat,
"codebook_indices": tensor([[2.0, 2.0, 0.0], [2.0, 1.0, 3.0]]).type(
torch.LongTensor
),
"quantized": expected_quantized,
}
assert_expected_namedtuple(actual, expected)
def test_preprocess(self, codebook, encoded):
encoded_flat, permuted_shape = codebook._preprocess(encoded)
expected_flat_shape = torch.tensor([6, 5])
expected_permuted_shape = torch.tensor([2, 3, 5])
actual_flat_shape = torch.tensor(encoded_flat.shape)
actual_permuted_shape = torch.tensor(permuted_shape)
assert_expected(actual_flat_shape, expected_flat_shape)
assert_expected(actual_permuted_shape, expected_permuted_shape)
def test_preprocess_channel_dim_assertion(self, codebook, encoded):
with pytest.raises(ValueError):
codebook._preprocess(encoded[:, :4, :])
def test_postprocess(self, codebook, input_tensor_flat):
quantized = codebook._postprocess(input_tensor_flat, torch.Size([2, 2, 3]))
actual_quantized_shape = torch.tensor(quantized.shape)
expected_quantized_shape = torch.tensor([2, 3, 2])
assert_expected(actual_quantized_shape, expected_quantized_shape)
def test_init_embedding(self, codebook, encoded, num_embeddings):
assert (
not codebook._is_embedding_init
), "embedding init flag not False initially"
encoded_flat, _ = codebook._preprocess(encoded)
codebook._init_embedding(encoded_flat)
assert codebook._is_embedding_init, "embedding init flag not True after init"
actual_weight = codebook.embedding
expected_weight = tensor(
[
[2.0, -1.0, 0.0, 2.0, 0.0],
[2.0, 1.0, 0.0, 1.0, 1.0],
[0.0, 1.0, -1.0, 2.0, -1.0],
[1.0, 0.0, -1.0, -1.0, 1.0],
]
)
assert_expected(actual_weight, expected_weight)
actual_code_avg = codebook.code_avg
expected_code_avg = actual_weight
assert_expected(actual_code_avg, expected_code_avg)
actual_code_usage = codebook.code_usage
expected_code_usage = torch.ones(num_embeddings)
assert_expected(actual_code_usage, expected_code_usage)
def test_ema_update_embedding(self, codebook, encoded):
encoded_flat, _ = codebook._preprocess(encoded)
codebook._init_embedding(encoded_flat)
distances = torch.cdist(encoded_flat, codebook.embedding, p=2.0) ** 2
codebook_indices = torch.argmin(distances, dim=1)
codebook._ema_update_embedding(encoded_flat, codebook_indices)
actual_weight = codebook.embedding
expected_weight = tensor(
[
[0.7647, -1.4118, 0.0000, 1.5882, 0.0000],
[2.0000, 1.0000, 0.0000, 1.0000, 1.0000],
[-0.4118, 1.4118, -0.5882, 1.1765, -1.4118],
[1.0000, 0.0000, -1.0000, -1.0000, 1.0000],
]
)
assert_expected(actual_weight, expected_weight, rtol=0.0, atol=1e-4)
actual_code_avg = codebook.code_avg
expected_code_avg = tensor(
[
[1.3000, -2.4000, 0.0000, 2.7000, 0.0000],
[2.0000, 1.0000, 0.0000, 1.0000, 1.0000],
[-0.7000, 2.4000, -1.0000, 2.0000, -2.4000],
[1.0000, 0.0000, -1.0000, -1.0000, 1.0000],
]
)
assert_expected(actual_code_avg, expected_code_avg, rtol=0.0, atol=1e-4)
actual_code_usage = codebook.code_usage
expected_code_usage = tensor([1.7000, 1.0000, 1.7000, 1.0000])
assert_expected(actual_code_usage, expected_code_usage, rtol=0.0, atol=1e-4)
def test_register_buffer_tensors(self, codebook, encoded):
out = codebook(encoded)
out.quantized.sum().backward()
msg_has_grad = "tensor assigned to buffer but accumulated grad"
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert not codebook.code_avg.grad, msg_has_grad
assert not codebook.code_usage.grad, msg_has_grad
assert not codebook.embedding.grad, msg_has_grad
assert not list(
codebook.parameters()
), "buffer variables incorrectly assigned as params"
def test_init_embedding_smaller_encoded(self, codebook, encoded):
encoded_small = encoded[:1, :, :2]
encoded_small_flat, _ = codebook._preprocess(encoded_small)
codebook._init_embedding(encoded_small_flat)
embed = codebook.embedding
# Check for each embedding vector if there is one equal encoded vector + noise
for emb in embed:
assert any(
[
torch.isclose(emb, enc, rtol=0, atol=0.01).all()
for enc in encoded_small_flat
]
), "embedding initialized from encoder output incorrectly"
def test_codebook_restart(self, codebook, encoded):
encoded_flat, _ = codebook._preprocess(encoded)
# First init and diversify embedding
codebook._init_embedding(encoded_flat)
# Use only embedding vector at index = 1 and force restarts.
# Slightly modify encoded_flat to make sure vectors restart to something new
encoded_flat_noise = encoded_flat + torch.randn_like(encoded_flat)
codebook_indices_low_usage = torch.ones(encoded_flat.shape[0], dtype=torch.long)
codebook._ema_update_embedding(encoded_flat_noise, codebook_indices_low_usage)
# Check if embedding contains restarts
for i, emb in enumerate(codebook.embedding):
# We used only emb vector with index = 1, so check it was not restarted
if i == 1:
assert_expected(
emb,
codebook.code_avg[1] / codebook.code_usage[1],
rtol=0,
atol=1e-4,
)
# Compare each embedding vector to each encoded vector.
# If at least one match, then restart happened.
else:
assert any(
[
torch.isclose(emb, enc, rtol=0, atol=1e-4).all()
for enc in encoded_flat_noise
]
), "embedding restarted from encoder output incorrectly"
def test_load_state_dict(self):
state_dict = OrderedDict(
[
("linear.weight", tensor([[1.0]])),
("linear.bias", tensor([2.0])),
("codebook.embedding", tensor([[3.0]])),
("codebook.code_usage", tensor([4.0])),
("codebook.code_avg", tensor([[5.0]])),
]
)
class DummyModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 1)
self.codebook = Codebook(1, 1)
model = DummyModel()
assert not model.codebook._is_embedding_init
model.load_state_dict(state_dict)
assert model.codebook._is_embedding_init
actual = model.codebook.embedding
expected = state_dict["codebook.embedding"]
assert_expected(actual, expected)
actual = model.codebook.code_usage
expected = state_dict["codebook.code_usage"]
assert_expected(actual, expected)
actual = model.codebook.code_avg
expected = state_dict["codebook.code_avg"]
assert_expected(actual, expected)
def test_lookup(self, codebook, embedding_weights):
codebook.embedding = embedding_weights
indices_flat = tensor([[0, 1]]) # (b, seq_len)
indices_shaped = tensor([[[0, 1], [2, 3]]]) # (b, shape)
actual_quantized_flat = codebook.lookup(indices_flat)
actual_quantized = codebook.lookup(indices_shaped)
expected_quantized_flat = tensor(
[[[1.0, 0.0, -1.0, -1.0, 2.0], [2.0, -2.0, 0.0, 0.0, 1.0]]]
)
expected_quantized = tensor(
[
[
[[1.0, 0.0, -1.0, -1.0, 2.0], [2.0, -2.0, 0.0, 0.0, 1.0]],
[[2.0, 1.0, 0.0, 1.0, 1.0], [-1.0, -2.0, 0.0, 2.0, 0.0]],
]
]
)
assert_expected(
actual_quantized_flat, expected_quantized_flat, rtol=0.0, atol=1e-4
)
assert_expected(actual_quantized, expected_quantized, rtol=0.0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_codebook.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from itertools import repeat
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.modules.layers.attention import (
AxialAttention,
AxialAttentionBlock,
merge_multihead,
MultiHeadAttention,
scaled_dot_product_attention,
SelfAttention,
split_multihead,
)
@pytest.fixture(autouse=True)
def set_seed():
set_rng_seed(4)
@pytest.fixture
def hidden_dim():
return 3
@pytest.fixture
def n_dim():
return 3
@pytest.fixture
def input_shape(n_dim):
return tuple(repeat(2, n_dim))
@pytest.fixture
def q(input_shape, hidden_dim):
n_heads = 1
return torch.randn(1, n_heads, *input_shape, hidden_dim // n_heads)
@pytest.fixture
def kv(input_shape, hidden_dim):
n_heads = 1
return torch.randn(1, n_heads, *input_shape, hidden_dim // n_heads)
@pytest.fixture
def self_attn():
return SelfAttention(attn_dropout=0.0)
@pytest.fixture
def axial_attn():
return AxialAttention(1) # only on second axis of input
class TestMultiheadAttention:
@pytest.fixture
def multihead_attn(self, hidden_dim):
def create_multihead_attn(n_heads, attn_module):
return MultiHeadAttention(hidden_dim, hidden_dim, n_heads, attn_module)
return create_multihead_attn
def test_multi_head_self_attention(
self,
input_shape,
hidden_dim,
multihead_attn,
self_attn,
):
mha = multihead_attn(1, self_attn)
qkv = 2 * torch.ones(1, *input_shape, hidden_dim)
actual = mha(qkv)
expected = torch.tensor(
[
[
[
[
[1.069666, 1.304498, -0.016060],
[1.069666, 1.304498, -0.016060],
],
[
[1.069666, 1.304498, -0.016060],
[1.069666, 1.304498, -0.016060],
],
],
[
[
[1.069666, 1.304498, -0.016060],
[1.069666, 1.304498, -0.016060],
],
[
[1.069666, 1.304498, -0.016060],
[1.069666, 1.304498, -0.016060],
],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_multi_head_cross_attention(
self,
input_shape,
hidden_dim,
multihead_attn,
self_attn,
):
mha = multihead_attn(1, self_attn)
q = 2 * torch.ones(1, *input_shape, hidden_dim)
kv = torch.ones(1, *input_shape, hidden_dim)
actual = mha(q, kv)
expected = torch.tensor(
[
[
[
[[0.7675, 0.8126, -0.1126], [0.7675, 0.8126, -0.1126]],
[[0.7675, 0.8126, -0.1126], [0.7675, 0.8126, -0.1126]],
],
[
[[0.7675, 0.8126, -0.1126], [0.7675, 0.8126, -0.1126]],
[[0.7675, 0.8126, -0.1126], [0.7675, 0.8126, -0.1126]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_multi_head_attention_use_cache(
self, input_shape, hidden_dim, multihead_attn, self_attn, mocker
):
mha = multihead_attn(1, self_attn)
mock_projection_k = mocker.patch.object(
mha.key, "forward", wraps=mha.key.forward
)
mock_projection_v = mocker.patch.object(
mha.value, "forward", wraps=mha.value.forward
)
q = 2 * torch.ones(1, *input_shape, hidden_dim)
kv = 2 * torch.ones(1, *input_shape, hidden_dim)
expected = torch.tensor(
[
[
[
[[1.0697, 1.3045, -0.0161], [1.0697, 1.3045, -0.0161]],
[[1.0697, 1.3045, -0.0161], [1.0697, 1.3045, -0.0161]],
],
[
[[1.0697, 1.3045, -0.0161], [1.0697, 1.3045, -0.0161]],
[[1.0697, 1.3045, -0.0161], [1.0697, 1.3045, -0.0161]],
],
]
]
)
# cached k, v are linearly projected and split-headed: (b, n_heads, d1, ..., dn, emb_dim)
expected_k = torch.tensor(
[
[
[
[
[
[0.935526, 0.753922, -1.434496],
[0.935526, 0.753922, -1.434496],
],
[
[0.935526, 0.753922, -1.434496],
[0.935526, 0.753922, -1.434496],
],
],
[
[
[0.935526, 0.753922, -1.434496],
[0.935526, 0.753922, -1.434496],
],
[
[0.935526, 0.753922, -1.434496],
[0.935526, 0.753922, -1.434496],
],
],
]
]
]
)
expected_v = torch.tensor(
[
[
[
[
[[2.0164, 1.4426, 1.0050], [2.0164, 1.4426, 1.0050]],
[[2.0164, 1.4426, 1.0050], [2.0164, 1.4426, 1.0050]],
],
[
[[2.0164, 1.4426, 1.0050], [2.0164, 1.4426, 1.0050]],
[[2.0164, 1.4426, 1.0050], [2.0164, 1.4426, 1.0050]],
],
]
]
]
)
# initially the cache should be empty
assert not mha.cache
for i in range(2):
# pertube the input k, v but cache only once
actual = mha(q, kv + i, use_cache=True)
assert_expected(mha.cache["k"], expected_k, rtol=0, atol=1e-4)
assert_expected(mha.cache["v"], expected_v, rtol=0, atol=1e-4)
assert_expected(actual, expected, rtol=0, atol=1e-4)
# test that k, v projection is skipped except for the first pass
mock_projection_k.assert_called_once()
mock_projection_v.assert_called_once()
def test_multi_head_attention_causal_use_cache(
self, input_shape, hidden_dim, multihead_attn, self_attn
):
n_heads = 1
mha = multihead_attn(n_heads, self_attn)
seq_len = torch.prod(torch.tensor(input_shape)).item()
q = 2 * torch.ones(1, *input_shape, hidden_dim).flatten(start_dim=1, end_dim=-2)
kv = 2 * torch.ones(1, *input_shape, hidden_dim).flatten(
start_dim=1, end_dim=-2
)
out = []
# initially the cache should be empty
assert not mha.cache
# decoding is step-wise along the sequence dim
for i in range(seq_len):
out.append(
mha(q[:, i : i + 1], kv[:, i : i + 1], use_cache=True, causal=True)
)
# cached k, v are flattened and augmented by 1 unit at each step
expected_kv_shape = torch.Size([1, n_heads, (i + 1), hidden_dim])
assert_expected(mha.cache["k"].shape, expected_kv_shape)
assert_expected(mha.cache["v"].shape, expected_kv_shape)
out = torch.cat(out, dim=1)
assert_expected(out.shape, torch.Size([1, seq_len, hidden_dim]))
class TestScaledDotProductAttention:
def test_scaled_dot_product_attention(self, q, kv):
output, weights = scaled_dot_product_attention(q, kv, kv)
actual = output
expected = torch.tensor(
[
[
[
[
[[-0.5862, 1.7955, 1.0711], [-0.2718, 1.2177, 1.4946]],
[[-0.0613, 0.1774, 0.4893], [0.6899, -0.0650, 0.2909]],
],
[
[[0.2950, 1.2029, 1.7035], [0.2735, 0.5582, 0.6797]],
[[-1.1558, 1.0143, 0.1598], [0.7875, 0.0928, -0.7952]],
],
],
],
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
actual = weights
expected = torch.tensor(
[
[
[
[
[[0.8797, 0.1203], [0.5595, 0.4405]],
[[0.0553, 0.9447], [0.4549, 0.5451]],
],
[
[[0.0419, 0.9581], [0.4391, 0.5609]],
[[0.0297, 0.9703], [0.7313, 0.2687]],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_scaled_dot_product_attention_with_attention_mask(self, q, kv):
attn_shape = torch.Size([1, 1, 2, 2, 2, 2])
mask = torch.tensor([1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1]).view(
attn_shape
)
actual, _ = scaled_dot_product_attention(q, kv, kv, attention_mask=mask)
expected = torch.tensor(
[
[
[
[
[[-0.7042, 2.0126, 0.9120], [-0.2718, 1.2177, 1.4946]],
[[-0.1652, 0.2109, 0.5167], [1.7146, -0.3956, 0.0204]],
],
[
[[0.2950, 1.2029, 1.7035], [0.2973, 1.2710, 1.8117]],
[[1.5320, -0.2602, -1.1611], [0.7875, 0.0928, -0.7952]],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_scaled_dot_product_attention_with_head_mask(self, q, kv):
attn_shape = torch.Size([1, 1, 2, 2, 2, 2])
mask = torch.tensor([1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1]).view(
attn_shape
)
actual, _ = scaled_dot_product_attention(q, kv, kv, head_mask=mask)
expected = torch.tensor(
[
[
[
[
[[-0.6195, 1.7705, 0.8023], [-0.2718, 1.2177, 1.4946]],
[[-0.1561, 0.1993, 0.4882], [0.7800, -0.1800, 0.0093]],
],
[
[[0.2950, 1.2029, 1.7035], [0.1668, 0.7129, 1.0162]],
[[0.0455, -0.0077, -0.0345], [0.7875, 0.0928, -0.7952]],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_scaled_dot_product_attention_with_dropout(self, q, kv):
actual, _ = scaled_dot_product_attention(q, kv, kv, attn_dropout=0.3)
expected = torch.tensor(
[
[
[
[
[
[0.0000e00, 0.0000e00, 0.0000e00],
[-5.6284e-01, 1.6085e00, 7.2891e-01],
],
[
[1.3536e-01, -3.1232e-02, 1.6106e-03],
[9.8563e-01, -9.2847e-02, 4.1562e-01],
],
],
[
[
[4.2149e-01, 1.7184e00, 2.4336e00],
[2.3824e-01, 1.0184e00, 1.4517e00],
],
[
[-1.6511e00, 1.4490e00, 2.2828e-01],
[1.1250e00, 1.3256e-01, -1.1361e00],
],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_self_attention(self_attn, q, kv):
k = v = kv
actual, _ = self_attn(q, k, v)
# Output of self attention should be same as scaled_dot_product_attention
# since input dims are flattened
expected = torch.tensor(
[
[
[
[
[[-0.4851, 1.2020, 0.7056], [0.3507, 0.3822, 0.2783]],
[[-0.8302, 1.1415, 0.4297], [-0.0969, 1.0956, 0.9591]],
],
[
[[-0.0698, 0.9357, 1.4559], [-0.7157, 1.3919, 0.5880]],
[[-0.0598, 1.1194, 1.5332], [0.5494, -0.0489, -0.4454]],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_axial_attention(axial_attn, q, kv):
k = v = kv
actual, _ = axial_attn(q, k, v)
expected = torch.tensor(
[
[
[
[
[[-0.5869, 1.8958, 0.8688], [0.0299, 0.2098, 1.2741]],
[[-0.6662, 1.9747, 0.8980], [0.1002, 0.2094, 1.5472]],
],
[
[[0.5902, -0.3275, -0.8727], [-1.0557, 1.0791, 0.3916]],
[[0.6623, -0.3223, -0.8948], [-1.0755, 1.0763, 0.3708]],
],
]
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_split_multihead(input_shape):
x = torch.randn(1, *input_shape, 6) # (b, d1, ..., dn, c)
out = split_multihead(x, 2)
actual = torch.tensor(out.shape)
expected = torch.tensor((1, 2, *input_shape, 3)) # (b, h, d1, ..., dn, c // h)
assert_expected(actual, expected)
def test_merge_multihead(input_shape, hidden_dim, q):
out = merge_multihead(q)
actual = torch.tensor(out.shape)
expected = torch.tensor((1, *input_shape, hidden_dim))
assert_expected(actual, expected)
class TestAxialBlock:
@pytest.fixture
def axial_block(self, input_shape, hidden_dim):
return AxialAttentionBlock(len(input_shape), hidden_dim, 1)
def test_axial_block_forward(self, axial_block, hidden_dim, input_shape):
"""Test AxialAttentionBlock with sub-components"""
x = 2 * torch.ones(1, hidden_dim, *input_shape)
actual = axial_block(x)
expected = torch.tensor(
[
[
[
[[0.822055, 0.822055], [0.822055, 0.822055]],
[[0.822055, 0.822055], [0.822055, 0.822055]],
],
[
[[-0.767143, -0.767143], [-0.767143, -0.767143]],
[[-0.767143, -0.767143], [-0.767143, -0.767143]],
],
[
[[-0.916860, -0.916860], [-0.916860, -0.916860]],
[[-0.916860, -0.916860], [-0.916860, -0.916860]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_axial_block_channel_dim(self, axial_block, hidden_dim, input_shape):
"""Test dim check in forward of AxialAttentionBlock"""
x = torch.zeros(1, hidden_dim + 1, *input_shape)
with pytest.raises(ValueError):
_ = axial_block(x)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_attention.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
class TestTextEmbeddings:
@pytest.fixture
def text_embedding(self):
return BERTTextEmbeddings(hidden_size=3, vocab_size=3)
@pytest.fixture
def input_ids(self):
return torch.tensor([[1, 2], [0, 2]])
def test_forward(self, input_ids, text_embedding):
embs = text_embedding(input_ids)
actual = embs.shape
expected = torch.Size([2, 2, 3])
assert_expected(actual, expected)
def test_invalid_input(self, text_embedding):
with pytest.raises(ValueError):
_ = text_embedding()
def test_create_position_ids_from_input_ids(self, input_ids, text_embedding):
actual = text_embedding.create_position_ids_from_input_ids(input_ids)
expected = torch.tensor([[1, 2], [0, 1]])
assert_expected(actual, expected)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_text_embedding.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn
from torchmultimodal.modules.layers.transformer import (
TransformerCrossAttentionLayer,
TransformerEncoder,
TransformerEncoderLayer,
)
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
class TestTransformerEncoderLayer:
@pytest.fixture
def get_encoder_layer(self):
def create_layer(norm_first):
model = TransformerEncoderLayer(2, 1, 2, norm_first=norm_first)
model.eval()
return model
return create_layer
@pytest.fixture
def inputs(self):
return torch.randn(1, 2, 2, 2, 2)
def test_forward_prenorm(self, inputs, get_encoder_layer):
model = get_encoder_layer(True)
actual = model(inputs)
expected = torch.tensor(
[
[
[
[[-1.5605, 2.3367], [-0.8028, 1.2239]],
[[-0.3491, 0.7343], [-3.2212, 1.6979]],
],
[
[[-1.4874, 0.8684], [-0.2215, 1.7433]],
[[-0.6728, 1.1201], [-2.2237, -1.1081]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_forward_postnorm(self, inputs, get_encoder_layer):
model = get_encoder_layer(False)
actual = model(inputs)
expected = torch.tensor(
[
[
[
[[-1.0000, 1.0000], [-1.0000, 1.0000]],
[[-1.0000, 1.0000], [-1.0000, 1.0000]],
],
[
[[-1.0000, 1.0000], [-1.0000, 1.0000]],
[[-1.0000, 1.0000], [-1.0000, 1.0000]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
class TestTransformerCrossAttentionLayer:
@pytest.fixture
def get_encoder_layer(self):
def create_layer(norm_first):
model = TransformerCrossAttentionLayer(2, 1, 2, norm_first=norm_first)
model.eval()
return model
return create_layer
@pytest.fixture
def inputs(self):
return torch.randn(1, 2, 2, 2, 2)
@pytest.fixture
def cross_inputs(self):
return torch.randn(1, 2, 2, 2, 2)
def test_forward_prenorm(self, inputs, cross_inputs, get_encoder_layer):
model = get_encoder_layer(True)
actual = model(inputs, cross_inputs)
expected = torch.tensor(
[
[
[
[[-0.5925, 1.1257], [-0.5925, 1.1257]],
[[-0.5925, 1.1257], [-0.5925, 1.1257]],
],
[
[[-0.5925, 1.1257], [-0.5925, 1.1257]],
[[-0.5925, 1.1257], [-0.5925, 1.1257]],
],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_forward_postnorm(self, inputs, cross_inputs, get_encoder_layer):
model = get_encoder_layer(False)
actual = model(inputs, cross_inputs)
expected = torch.tensor(
[
[
[[[-1.0, 1.0], [-1.0, 1.0]], [[-1.0, 1.0], [-1.0, 1.0]]],
[[[-1.0, 1.0], [-1.0, 1.0]], [[-1.0, 1.0], [-1.0, 1.0]]],
]
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
class TestTransformerEncoder:
@pytest.fixture
def encoder(self):
model = TransformerEncoder(
n_layer=2,
d_model=2,
n_head=2,
dim_feedforward=3072,
activation=nn.GELU,
norm_first=True,
)
model.eval()
return model
@pytest.fixture
def encoder_ln(self):
model = TransformerEncoder(
n_layer=2,
d_model=4,
n_head=2,
dim_feedforward=3072,
activation=nn.GELU,
norm_first=True,
final_layer_norm_eps=1e-12,
)
model.eval()
return model
@pytest.fixture
def inputs(self):
return torch.rand((2, 3, 2))
@pytest.fixture
def inputs_ln(self):
return torch.rand((2, 3, 4))
def test_forward(self, inputs, encoder):
output = encoder(inputs, return_hidden_states=True, return_attn_weights=True)
actual_last_hidden_state = output.last_hidden_state
actual_hidden_states = torch.sum(torch.stack(output.hidden_states), dim=0)
actual_attentions = torch.sum(torch.stack(output.attentions), dim=0)
expected_last_hidden_state = torch.Tensor(
[
[[1.6669, 0.3613], [1.0610, 0.0896], [0.9768, -0.0814]],
[[2.3306, 0.6623], [1.8439, 0.7909], [1.6566, -0.0360]],
]
)
expected_hidden_states = torch.Tensor(
[
[[3.4371, 0.9657], [1.7571, 0.0734], [1.5043, -0.4397]],
[[5.1976, 1.9218], [3.8499, 2.2402], [3.1757, -0.1730]],
]
)
expected_attentions = torch.Tensor(
[
[
[
[0.8520, 0.5740, 0.5740],
[0.6232, 0.6884, 0.6884],
[0.6232, 0.6884, 0.6884],
],
[
[0.5859, 0.7071, 0.7071],
[0.6515, 0.6742, 0.6742],
[0.6515, 0.6742, 0.6742],
],
],
[
[
[0.7392, 0.5216, 0.7392],
[0.6434, 0.7132, 0.6434],
[0.7392, 0.5216, 0.7392],
],
[
[0.6207, 0.7586, 0.6207],
[0.6589, 0.6822, 0.6589],
[0.6207, 0.7586, 0.6207],
],
],
]
)
assert_expected(
actual_last_hidden_state, expected_last_hidden_state, rtol=0.0, atol=1e-4
)
assert_expected(
actual_hidden_states, expected_hidden_states, rtol=0.0, atol=1e-4
)
assert_expected(actual_attentions, expected_attentions, rtol=0.0, atol=1e-4)
# set flags to false
output = encoder(inputs)
actual_last_hidden_state = output.last_hidden_state
assert_expected(
actual_last_hidden_state, expected_last_hidden_state, rtol=0.0, atol=1e-4
)
def test_forward_ln(self, inputs_ln, encoder_ln):
output = encoder_ln(
inputs_ln, return_hidden_states=True, return_attn_weights=True
)
actual_last_hidden_state = output.last_hidden_state
actual_hidden_states = torch.sum(torch.stack(output.hidden_states), dim=0)
actual_attentions = torch.sum(torch.stack(output.attentions), dim=0)
expected_last_hidden_state = torch.Tensor(
[
[
[0.0670, -1.5311, 1.2704, 0.1937],
[-0.7999, -0.9243, 1.5761, 0.1481],
[0.1925, -0.6254, 1.5352, -1.1022],
],
[
[0.6513, -1.6111, -0.0297, 0.9895],
[0.1316, -1.1576, 1.5417, -0.5156],
[-0.3600, -1.4460, 0.6302, 1.1758],
],
]
)
expected_hidden_states = torch.Tensor(
[
[
[1.4894, 1.2685, 1.7669, 0.8038],
[0.1563, 0.3721, 4.3070, 2.4121],
[1.6380, 2.0771, 2.3102, 0.4584],
],
[
[2.8866, 2.0093, 2.8522, 3.0838],
[1.8855, 1.0953, 2.5921, 0.6673],
[1.8191, 1.5908, 2.8085, 2.3234],
],
]
)
expected_attentions = torch.Tensor(
[
[
[
[0.6653, 0.6376, 0.6971],
[0.7078, 0.5621, 0.7302],
[0.6506, 0.6943, 0.6551],
],
[
[0.6333, 0.7897, 0.5770],
[0.7207, 0.7019, 0.5774],
[0.7285, 0.7195, 0.5520],
],
],
[
[
[0.6919, 0.7021, 0.6060],
[0.6274, 0.7462, 0.6264],
[0.7025, 0.7090, 0.5885],
],
[
[0.5826, 0.6227, 0.7947],
[0.6855, 0.6174, 0.6971],
[0.7317, 0.6057, 0.6625],
],
],
]
)
assert_expected(
actual_last_hidden_state, expected_last_hidden_state, rtol=0.0, atol=1e-4
)
assert_expected(
actual_hidden_states, expected_hidden_states, rtol=0.0, atol=1e-4
)
assert_expected(actual_attentions, expected_attentions, rtol=0.0, atol=1e-4)
# set flags to false
output = encoder_ln(inputs_ln)
actual_last_hidden_state = output.last_hidden_state
assert_expected(
actual_last_hidden_state, expected_last_hidden_state, rtol=0.0, atol=1e-4
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_transformer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import warnings
from itertools import product
import torch
from tests.test_utils import assert_expected
from torchmultimodal.modules.layers.conv import (
calculate_same_padding,
calculate_transpose_padding,
SamePadConv3d,
SamePadConvTranspose3d,
)
class TestSamePadConv3d(unittest.TestCase):
"""
Test the SamePadConv3d class and associated helpers
"""
def setUp(self):
inputs = [torch.ones(1, 1, 8, 8, 8), torch.ones(1, 1, 7, 7, 7)]
kernels = [(4, 4, 4), (3, 3, 3)]
strides = [(2, 2, 2), (3, 3, 3)]
self.test_cases = list(product(*[inputs, kernels, strides]))
self.pad_expected = [
(1, 1, 1, 1, 1, 1),
(1, 1, 1, 1, 1, 1),
(1, 0, 1, 0, 1, 0),
(1, 0, 1, 0, 1, 0),
(2, 1, 2, 1, 2, 1),
(2, 1, 2, 1, 2, 1),
(1, 1, 1, 1, 1, 1),
(1, 1, 1, 1, 1, 1),
]
self.out_shape_conv_expected = [
torch.tensor([1, 1, 4, 4, 4]),
torch.tensor([1, 1, 3, 3, 3]),
torch.tensor([1, 1, 4, 4, 4]),
torch.tensor([1, 1, 3, 3, 3]),
torch.tensor([1, 1, 4, 4, 4]),
torch.tensor([1, 1, 3, 3, 3]),
torch.tensor([1, 1, 4, 4, 4]),
torch.tensor([1, 1, 3, 3, 3]),
]
self.out_shape_convtranspose_expected = [
torch.tensor([1, 1, 16, 16, 16]),
torch.tensor([1, 1, 24, 24, 24]),
torch.tensor([1, 1, 16, 16, 16]),
torch.tensor([1, 1, 24, 24, 24]),
torch.tensor([1, 1, 14, 14, 14]),
torch.tensor([1, 1, 21, 21, 21]),
torch.tensor([1, 1, 14, 14, 14]),
torch.tensor([1, 1, 21, 21, 21]),
]
self.transpose_pad_expected = [
(3, 3, 3),
(4, 4, 4),
(2, 2, 2),
(2, 2, 2),
(4, 4, 4),
(5, 5, 5),
(3, 3, 3),
(3, 3, 3),
]
self.output_pad_expected = [
(0, 0, 0),
(1, 1, 1),
(1, 1, 1),
(1, 1, 1),
(0, 0, 0),
(0, 0, 0),
(1, 1, 1),
(0, 0, 0),
]
def test_calculate_same_padding_assert(self):
with self.assertRaises(ValueError):
_ = calculate_same_padding((3, 3), (2, 2, 2), (5, 5))
_ = calculate_same_padding(3, (2, 2), (5, 5, 5))
def test_calculate_same_padding_output(self):
for i, (inp, kernel, stride) in enumerate(self.test_cases):
pad_actual = calculate_same_padding(kernel, stride, inp.shape[2:])
self.assertEqual(
pad_actual,
self.pad_expected[i],
f"padding incorrect for shape {inp.shape}, kernel {kernel}, stride {stride}",
)
def test_samepadconv3d_forward(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
for i, (inp, kernel, stride) in enumerate(self.test_cases):
conv = SamePadConv3d(1, 1, kernel, stride, padding=0)
out = conv(inp)
out_shape_conv_actual = torch.tensor(out.shape)
assert_expected(out_shape_conv_actual, self.out_shape_conv_expected[i])
def test_calculate_transpose_padding_assert(self):
with self.assertRaises(ValueError):
_ = calculate_transpose_padding((3, 3), (2, 2, 2), (5, 5))
_ = calculate_transpose_padding(3, (2, 2), (5, 5, 5))
with self.assertRaises(ValueError):
_ = calculate_transpose_padding((3, 3), (2, 2), (5, 5), (1, 0, 1))
_ = calculate_transpose_padding(3, 2, (5, 5, 5), (1, 1, 1, 1, 1, 1, 1))
def test_calculate_transpose_padding_output(self):
for i, (inp, kernel, stride) in enumerate(self.test_cases):
pad = calculate_same_padding(kernel, stride, inp.shape[2:])
transpose_pad_actual, output_pad_actual = calculate_transpose_padding(
kernel, stride, inp.shape[2:], pad
)
self.assertEqual(
transpose_pad_actual,
self.transpose_pad_expected[i],
f"transpose padding incorrect for shape {inp.shape}, kernel {kernel}, stride {stride}",
)
self.assertEqual(
output_pad_actual,
self.output_pad_expected[i],
f"output padding incorrect for shape {inp.shape}, kernel {kernel}, stride {stride}",
)
def test_samepadconvtranspose3d_forward(self):
for i, (inp, kernel, stride) in enumerate(self.test_cases):
conv = SamePadConvTranspose3d(1, 1, kernel, stride)
out = conv(inp)
out_shape_convtranspose_actual = torch.tensor(out.shape)
assert_expected(
out_shape_convtranspose_actual, self.out_shape_convtranspose_expected[i]
)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/layers/test_conv.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch import nn
from torchmultimodal.modules.encoders.mil_encoder import MILEncoder
from torchmultimodal.modules.layers.mlp import MLP
class DummyEncoder(nn.Module):
def __init__(self):
super().__init__()
self.transformer = nn.TransformerEncoder(
encoder_layer=nn.TransformerEncoderLayer(8, 2, batch_first=True),
num_layers=1,
norm=nn.LayerNorm(8),
)
def forward(self, x):
return self.transformer(x)[:, 0, :]
class TestMILEncoder(unittest.TestCase):
def setUp(self):
self.batch_size = 2
dim = 50
self.input = torch.rand((self.batch_size, dim))
self.input_bsz_1 = torch.rand(1, dim)
self.partition_size = 10
self.mlp_out_dim = 5
self.shared_enc_dim = 8
self.shared_encoder = nn.Linear(self.partition_size, self.shared_enc_dim)
self.mlp = MLP(in_dim=self.shared_enc_dim, out_dim=self.mlp_out_dim)
self.shared_test_encoder = DummyEncoder()
self.transformer = nn.TransformerEncoder(
encoder_layer=nn.TransformerEncoderLayer(
self.shared_enc_dim, 2, batch_first=True
),
num_layers=1,
norm=nn.LayerNorm(self.shared_enc_dim),
)
def test_forward(self):
partition_sizes = [self.partition_size] * 5
mil_encoder = MILEncoder(
partition_sizes,
self.shared_encoder,
self.shared_enc_dim,
self.mlp,
torch.sum,
)
out = mil_encoder(self.input)
self.assertEqual(out.size(), (self.batch_size, self.mlp_out_dim))
out = mil_encoder(self.input_bsz_1)
self.assertEqual(out.size(), (1, self.mlp_out_dim))
def test_transformer_pooling(self):
partition_sizes = [2, 1]
mil_encoder = MILEncoder(
partition_sizes,
self.shared_test_encoder,
8,
MLP(in_dim=self.shared_enc_dim, out_dim=self.mlp_out_dim),
self.transformer,
)
input = torch.rand(self.batch_size, 3, 8)
out = mil_encoder(input)
self.assertEqual(out.size(), (self.batch_size, self.mlp_out_dim))
def test_scripting(self):
partition_sizes = [self.partition_size] * 5
mil_encoder = MILEncoder(
partition_sizes,
self.shared_encoder,
self.shared_enc_dim,
self.mlp,
torch.sum,
)
scripted_encoder = torch.jit.script(mil_encoder)
scripted_encoder(self.input)
def test_invalid_partitioning(self):
partition_sizes = [12] * 5
mil_encoder = MILEncoder(
partition_sizes,
self.shared_encoder,
self.shared_enc_dim,
self.mlp,
torch.sum,
)
with self.assertRaises(ValueError):
mil_encoder(self.input)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/test_mil_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from tests.test_utils import assert_expected
from torch import nn
from torchmultimodal.modules.encoders.weighted_embedding_encoder import (
WeightedEmbeddingEncoder,
)
class TestEmbeddingEncoder(unittest.TestCase):
def setUp(self):
embedding_weights = torch.Tensor(
[
[1, 1],
[2, 2],
[1, 0],
]
)
self.embedding = nn.Embedding.from_pretrained(embedding_weights)
def test_forward_sum_pooling(self):
input = torch.Tensor(
[
[0.25, 0.75, 0],
[0.6, 0, 0.4],
]
)
weighted_embedding_encoder = WeightedEmbeddingEncoder(
embedding=self.embedding, pooling_function=torch.sum
)
actual = weighted_embedding_encoder(input)
expected = torch.Tensor(
[
[1.75, 1.75],
[1.0, 0.6],
]
)
assert_expected(actual, expected)
def test_forward_mean_pooling(self):
input = torch.Tensor(
[
[0.25, 0.75, 0],
[0.6, 0, 0.4],
]
)
weighted_embedding_encoder = WeightedEmbeddingEncoder(
embedding=self.embedding, pooling_function=torch.mean
)
actual = weighted_embedding_encoder(input)
expected = torch.Tensor(
[
[1.75 / 3, 1.75 / 3],
[1.0 / 3, 0.2],
]
)
assert_expected(actual, expected)
def test_forward_max_pooling(self):
input = torch.Tensor(
[
[0.25, 0.75, 0],
[0.6, 0, 0.4],
]
)
weighted_embedding_encoder = WeightedEmbeddingEncoder(
embedding=self.embedding, pooling_function=torch.max
)
actual = weighted_embedding_encoder(input)
expected = torch.Tensor(
[
[1.5, 1.5],
[0.6, 0.6],
]
)
assert_expected(actual, expected)
def test_scripting(self):
input = torch.Tensor(
[
[0.25, 0.75, 0],
[0.6, 0, 0.4],
]
)
weighted_embedding_encoder = WeightedEmbeddingEncoder(
embedding=self.embedding,
pooling_function=torch.mean,
)
scripted_encoder = torch.jit.script(weighted_embedding_encoder)
actual = scripted_encoder(input)
expected = torch.Tensor(
[
[1.75 / 3, 1.75 / 3],
[1.0 / 3, 0.2],
]
)
assert_expected(actual, expected)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/test_weighted_embedding_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from tests.test_utils import set_rng_seed
from torchmultimodal.modules.encoders.swin_transformer_3d_encoder import (
SwinTransformer3d,
)
from torchmultimodal.utils.common import get_current_device
from torchvision.models.video.swin_transformer import PatchEmbed3d
class TestSwinTransformer3d(unittest.TestCase):
def setUp(self):
set_rng_seed(42)
self.device = get_current_device()
# Setup Encoder to test
self.encoder = SwinTransformer3d(
patch_size=[2, 4, 4],
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=[8, 7, 7],
stochastic_depth_prob=0.2,
norm_layer=torch.nn.LayerNorm,
patch_embed=PatchEmbed3d,
num_classes=None,
).to(self.device)
def test_swin_transformer_3d_encoder(self):
image = torch.randn(1, 3, 1, 112, 112) # B C D H W
scores = self.encoder(image)
self.assertEqual(scores.size(), torch.Size([1, 768]))
self.assertAlmostEqual(scores.abs().sum().item(), 247.14674, 2)
def test_swin_transformer_3d_scripting(self):
torch.jit.script(self.encoder)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/test_swin_transformer_3d_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from tests.test_utils import assert_expected, set_rng_seed
from torch import Tensor
from torchmultimodal.modules.encoders.bert_text_encoder import bert_text_encoder
@pytest.fixture(autouse=True)
def random():
set_rng_seed(4)
class TestBERTTextEncoder:
@pytest.fixture
def encoder(self):
return bert_text_encoder(hidden_size=3, num_attention_heads=1, dropout=0.0)
def test_forward(self, encoder):
input_ids = torch.randint(10, (2, 2))
text_atts = Tensor([[1, 1], [1, 0]])
output = encoder(input_ids, text_atts)
expected = Tensor(
[
[[-0.658658, -0.754473, 1.413131], [-0.501156, -0.894687, 1.395843]],
[[-0.148285, -1.143851, 1.292136], [0.424911, -1.380611, 0.955700]],
]
)
assert_expected(output.last_hidden_state, expected, rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/test_bert_text_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from tests.test_utils import assert_expected
from torch import nn
from torchmultimodal.modules.encoders.embedding_encoder import EmbeddingEncoder
class TestEmbeddingEncoder(unittest.TestCase):
def setUp(self):
self.num_embeddings = 4
self.embedding_dim = 4
self.batch_size = 2
self.input_size = 6
embedding_weight = torch.Tensor([[4, 3, 2, 5], [2, 5, 6, 7], [1, 2, 0, 1]])
self.embedding = nn.Embedding.from_pretrained(embedding_weight)
self.data = torch.LongTensor([[1, 2], [0, 1]])
def test_embedding_encoder_sum(self):
encoder = EmbeddingEncoder(self.embedding, "sum")
actual = encoder(self.data)
expected = torch.FloatTensor([[3, 7, 6, 8], [6, 8, 8, 12]])
assert_expected(actual, expected)
def test_embedding_encoder_mean(self):
encoder = EmbeddingEncoder(self.embedding, "mean")
actual = encoder(self.data)
expected = torch.FloatTensor([[1.5, 3.5, 3, 4], [3, 4, 4, 6]])
assert_expected(actual, expected)
def test_embedding_encoder_max(self):
encoder = EmbeddingEncoder(self.embedding, "max")
actual = encoder(self.data)
expected = torch.FloatTensor([[2, 5, 6, 7], [4, 5, 6, 7]])
assert_expected(actual, expected)
def test_embedding_encoder_hash(self):
encoder = EmbeddingEncoder(self.embedding, "sum", use_hash=True)
data = torch.LongTensor([[1, 2], [7, 9]])
actual = encoder(data)
expected = torch.FloatTensor([[3, 7, 6, 8], [2, 4, 0, 2]])
assert_expected(actual, expected)
def test_embedding_encoder_invalid_pooling(self):
with self.assertRaises(ValueError):
EmbeddingEncoder(self.embedding, "random")
|
EXA-1-master
|
exa/libraries/multimodal-main/tests/modules/encoders/test_embedding_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
class LSTMEncoder(nn.Module):
"""An LSTM encoder. Stacks an LSTM on an embedding layer.
Args:
vocab_size (int): The size of the vocab for embeddings.
embedding_dim (int): The size of each embedding vector.
input_size (int): The number of features in the LSTM input.
hidden_size (int): The number of features in the hidden state.
bidirectional (bool): Whether to use bidirectional LSTM.
batch_first (bool): Whether to provide batches as (batch, seq, feature)
or (seq, batch, feature).
Inputs:
x (Tensor): Tensor containing a batch of input sequences.
"""
def __init__(
self,
vocab_size: int,
embedding_dim: int,
input_size: int,
hidden_size: int,
bidirectional: bool,
batch_first: bool,
):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
bidirectional=bidirectional,
batch_first=batch_first,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
_, x = self.lstm(self.embedding(x))
# N x B x H => B x X x H where N = num_layers * num_directions
x = x[0].transpose(0, 1)
# N should be 2 so we can merge in that dimension
assert x.size(1) == 2, "hidden state (final) should have 1st dim as 2"
x = torch.cat([x[:, 0, :], x[:, 1, :]], dim=-1)
return x
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/lstm_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
from examples.cnn_lstm.cnn_encoder import CNNEncoder
from examples.cnn_lstm.lstm_encoder import LSTMEncoder
from torch import nn
from torchmultimodal.models.late_fusion import LateFusion
from torchmultimodal.modules.fusions.concat_fusion import ConcatFusionModule
from torchmultimodal.modules.layers.mlp import MLP
DEFAULT_CNN_INPUT_DIMS = [3, 64, 128, 128, 64, 64]
DEFAULT_CNN_OUTPUT_DIMS = DEFAULT_CNN_INPUT_DIMS[1:] + [10]
DEFAULT_CNN_KERNEL_SIZES = [7, 5, 5, 5, 5, 1]
def cnn_lstm_classifier(
# Parameters for encoding the text
text_vocab_size: int,
text_embedding_dim: int = 20,
lstm_input_size: int = 20,
lstm_hidden_dim: int = 50,
lstm_bidirectional: bool = True,
lstm_batch_first: bool = True,
# parameters for encoding the image
cnn_input_dims: List[int] = DEFAULT_CNN_INPUT_DIMS,
cnn_output_dims: List[int] = DEFAULT_CNN_OUTPUT_DIMS,
cnn_kernel_sizes: List[int] = DEFAULT_CNN_KERNEL_SIZES,
# parameters for the classifier
classifier_in_dim: int = 450,
num_classes: int = 2,
) -> LateFusion:
"""
A simple example to show the composability in TorchMultimodal, and how to
make use of builder functions to build a given model from an
architecture. A builder_function takes in all of the parameters needed for
building the individual layers and simplifies the interface for the
architecture. In this example, we are explicitly working with the "text"
and "image" modalities. This is reflected in the ModuleDict passed to the
LateFusion's init function. Note that these keys should match up
with the input of the forward function, which will raise an error in case there's
a mismatch.
We use the LateFusion to build a multimodal classifier
which uses a CNN to encode images, an LSTM to encode text and a
simple MLP as a classifier. The output is raw scores.
Args:
text_vocab_size (int): The vocab size for text data.
text_embedding_dim (int): The size of each text embedding vector.
lstm_input_size (int): Number of expected features in LSTM input.
lstm_hidden_dim (int): Number of features in the LSTM hidden state.
lstm_bidirectional (bool): Whether to use a bidirectional LSTM.
lstm_batch_first (bool): Whether to provide LSTM batches as
(batch, seq, feature) or (seq, batch, feature).
cnn_input_dims (List[int]): Input dimensions for CNN layers.
cnn_output_dims (List[int]): Output dimensions for CNN layers.
Should match input dimensions offset by one.
cnn_kernel_sizes (List[int]): Kernel sizes for CNN convolutions.
Should match the sizes of cnn_input_dims and cnn_output_dims.
classifier_in_dim (Optional[int]): Input dimension for classifier.
Should equal output_dim for CNN + output_dim for LSTM (flattened).
num_classes (int): Number of classes predicted by classifier.
"""
image_encoder = CNNEncoder(
input_dims=cnn_input_dims,
output_dims=cnn_output_dims,
kernel_sizes=cnn_kernel_sizes,
)
text_encoder = LSTMEncoder(
vocab_size=text_vocab_size,
embedding_dim=text_embedding_dim,
input_size=lstm_input_size,
hidden_size=lstm_hidden_dim,
bidirectional=lstm_bidirectional,
batch_first=lstm_batch_first,
)
fusion_module = ConcatFusionModule()
# Notice the output of the classifier is raw scores
classifier = MLP(
classifier_in_dim,
num_classes,
activation=nn.ReLU,
normalization=nn.BatchNorm1d,
)
# The use of builder functions allows us to keep the architecture
# interfaces clean and intuitive
return LateFusion(
encoders=nn.ModuleDict({"image": image_encoder, "text": text_encoder}),
fusion_module=fusion_module,
head_module=classifier,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/cnn_lstm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import nn
class CNNEncoder(nn.Module):
"""A CNN encoder.
Stacks n layers of (Conv2d, MaxPool2d, BatchNorm2d), where n is determined
by the length of the input args.
Args:
input_dims (List[int]): List of input dimensions.
output_dims (List[int]): List of output dimensions. Should match
input_dims offset by one.
kernel_sizes (List[int]): Kernel sizes for convolutions. Should match
the sizes of cnn_input_dims and cnn_output_dims.
Inputs:
x (Tensor): Tensor containing a batch of images.
"""
def __init__(
self, input_dims: List[int], output_dims: List[int], kernel_sizes: List[int]
):
super().__init__()
conv_layers: List[nn.Module] = []
assert len(input_dims) == len(output_dims) and len(output_dims) == len(
kernel_sizes
), "input_dims, output_dims, and kernel_sizes should all have the same length"
assert (
input_dims[1:] == output_dims[:-1]
), "output_dims should match input_dims offset by one"
for in_channels, out_channels, kernel_size in zip(
input_dims,
output_dims,
kernel_sizes,
):
padding_size = kernel_size // 2
conv = nn.Conv2d(
in_channels, out_channels, kernel_size, padding=padding_size
)
max_pool2d = nn.MaxPool2d(2, stride=2)
batch_norm_2d = nn.BatchNorm2d(out_channels)
conv_layers.append(
nn.Sequential(conv, nn.LeakyReLU(), max_pool2d, batch_norm_2d)
)
conv_layers.append(nn.Flatten())
self.cnn = nn.Sequential(*conv_layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.cnn(x)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/cnn_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.cnn_lstm.cnn_lstm import cnn_lstm_classifier
from tests.test_utils import assert_expected, set_rng_seed
@pytest.fixture(autouse=True)
def random():
set_rng_seed(1234)
class TestCNNLSTMModule:
@pytest.fixture
def classifier_in_dim(self):
return 450
@pytest.fixture
def num_classes(self):
return 32
@pytest.fixture
def cnn_lstm(self, classifier_in_dim, num_classes):
return cnn_lstm_classifier(
text_vocab_size=80,
text_embedding_dim=20,
cnn_input_dims=[3, 64, 128, 128, 64, 64],
cnn_output_dims=[64, 128, 128, 64, 64, 10],
cnn_kernel_sizes=[7, 5, 5, 5, 5, 1],
lstm_input_size=20,
lstm_hidden_dim=50,
lstm_bidirectional=True,
lstm_batch_first=True,
classifier_in_dim=classifier_in_dim,
num_classes=num_classes,
)
@pytest.fixture
def text(self):
return torch.randint(1, 79, (10,), dtype=torch.long).unsqueeze(0)
@pytest.fixture
def image(self):
return torch.randn(3, 320, 480).unsqueeze(0)
def test_forward(self, text, image, cnn_lstm):
assert isinstance(cnn_lstm, torch.nn.Module)
scores = cnn_lstm({"image": image, "text": text})
assert_expected(scores.size(), torch.Size((1, 32)))
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/tests/test_cnn_lstm.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.cnn_lstm.cnn_encoder import CNNEncoder
from tests.test_utils import assert_expected, set_rng_seed
from torch import nn, Tensor
@pytest.fixture(autouse=True)
def random():
set_rng_seed(0)
class TestCNNEncoder:
@pytest.fixture()
def input(self):
return Tensor([1, 2, 3, 4, 5, 6, 1, 3, 5, 2, 4, 6]).reshape(2, 1, 2, 3)
@pytest.fixture()
def input_dims(self):
return [0, 1, 2, 3]
@pytest.fixture()
def output_dims(self):
return [1, 2, 4, 5]
@pytest.fixture()
def kernel_sizes(self):
return [6, 7, 8, 9]
@pytest.fixture()
def single_layer_input(self):
return torch.rand(3, 3, 2, 2)
@pytest.fixture()
def single_layer_cnn_encoder(self):
return CNNEncoder([3], [3], [5])
@pytest.fixture()
def multiple_layer_input(self):
return torch.rand(3, 3, 8, 8)
@pytest.fixture()
def multiple_layer_cnn_encoder(self):
return CNNEncoder([3, 2, 1], [2, 1, 2], [3, 5, 7])
@pytest.fixture()
def small_cnn_encoder(self):
return CNNEncoder([1], [1], [2])
def test_invalid_arg_lengths(self, input_dims, output_dims, kernel_sizes):
with pytest.raises(AssertionError):
CNNEncoder(
input_dims[1:],
output_dims,
kernel_sizes,
)
def test_invalid_output_dims(self, input_dims, output_dims, kernel_sizes):
with pytest.raises(AssertionError):
CNNEncoder(
input_dims,
output_dims,
kernel_sizes,
)
def test_single_layer(self, single_layer_input, single_layer_cnn_encoder):
actual = single_layer_cnn_encoder(single_layer_input)
expected = Tensor(
[
[-0.452341, 0.680854, -0.557894],
[-0.924794, 0.729902, -0.836271],
[1.377135, -1.410758, 1.394166],
]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_multiple_layer(self, multiple_layer_input, multiple_layer_cnn_encoder):
actual = multiple_layer_cnn_encoder(multiple_layer_input)
expected = Tensor(
[[-0.482730, -0.253406], [1.391524, 1.298026], [-0.908794, -1.044622]]
)
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_fixed_weight_and_bias(self, input, small_cnn_encoder):
small_cnn_encoder.cnn[0][0].bias = nn.Parameter(Tensor([0.5]))
small_cnn_encoder.cnn[0][0].weight = nn.Parameter(
Tensor([[1.0, 2.0], [3.0, 4.0]]).unsqueeze(0).unsqueeze(0)
)
actual = small_cnn_encoder(input)
expected = Tensor([[-0.434959, 0.807781], [-1.429150, 1.056329]])
assert_expected(actual, expected, rtol=0, atol=1e-4)
def test_scripting(self, input, small_cnn_encoder):
small_cnn_encoder.cnn[0][0].bias = nn.Parameter(Tensor([0.5]))
small_cnn_encoder.cnn[0][0].weight = nn.Parameter(
Tensor([[1.0, 2.0], [3.0, 4.0]]).unsqueeze(0).unsqueeze(0)
)
scripted_encoder = torch.jit.script(small_cnn_encoder)
actual = scripted_encoder(input)
expected = Tensor([[-0.434959, 0.807781], [-1.429150, 1.056329]])
assert_expected(actual, expected, rtol=0, atol=1e-4)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/tests/test_cnn_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from examples.cnn_lstm.lstm_encoder import LSTMEncoder
from tests.test_utils import assert_expected
class TestLSTMEncoder:
@pytest.fixture
def input(self):
return torch.randint(1, 79, (10,), dtype=torch.long).unsqueeze(0)
@pytest.fixture
def lstm_encoder(self):
return LSTMEncoder(
vocab_size=80,
embedding_dim=20,
input_size=20,
hidden_size=50,
bidirectional=True,
batch_first=True,
)
def test_lstm_encoder(self, input, lstm_encoder):
out = lstm_encoder(input)
assert_expected(out.size(), torch.Size([1, 100]))
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/cnn_lstm/tests/test_lstm_encoder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Tuple
from omegaconf import MISSING
def _default_split_key_mapping():
return {x: x for x in ["train", "validation", "test"]}
@dataclass
class DatasetInfo:
key: str = MISSING
@dataclass
class HFDatasetInfo(DatasetInfo):
key: str = MISSING
subset: Optional[str] = None
remove_columns: Optional[List[str]] = None
# Any is actually list of pairs for renaming the column A to B
# limited to Any because of OmegaConf limitations
rename_columns: Optional[List[Any]] = None
# TODO: Look if we can add text column option and encode transform settings here.
split_key_mapping: Optional[Dict[str, str]] = field(
default_factory=_default_split_key_mapping
)
extra_kwargs: Dict[str, Any] = field(default_factory=dict)
@dataclass
class TorchVisionDatasetInfo(DatasetInfo):
key: str = MISSING
train_split: str = "train"
val_split: str = "val"
has_val: bool = True
test_split: str = "test"
@dataclass
class TrainingSingleDatasetInfo:
train: List[DatasetInfo] = field(default_factory=lambda: [HFDatasetInfo()])
val: Optional[List[DatasetInfo]] = None
batch_size: Optional[int] = None
num_workers: Optional[int] = None
allow_uneven_batches: bool = False
datamodule_extra_kwargs: Dict[str, Any] = field(default_factory=dict)
@dataclass
class TrainingDatasetsInfo:
selected: List[str] = field(default_factory=lambda: ["image", "text", "vl"])
image: Optional[TrainingSingleDatasetInfo] = None
text: Optional[TrainingSingleDatasetInfo] = None
vl: Optional[TrainingSingleDatasetInfo] = None
num_classes: int = MISSING
@dataclass
class TrainingArguments:
# Any lightning args to be pushed here
lightning: Dict[str, Any] = field(default=dict)
lightning_checkpoint: Optional[Dict[str, Any]] = None
lightning_load_from_checkpoint: Optional[str] = None
seed: int = -1
batch_size: int = 8
num_workers: int = 4
learning_rate: float = 0.0002
adam_eps: float = 1e-08
adam_weight_decay: float = 0.01
adam_betas: Tuple[float, float] = field(default_factory=lambda: (0.9, 0.999))
warmup_steps: int = 2000
@dataclass
class ModelArguments:
pretrained: bool = False
@dataclass
class FLAVAArguments:
datasets: TrainingDatasetsInfo = TrainingDatasetsInfo()
training: TrainingArguments = TrainingArguments()
model: ModelArguments = ModelArguments()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/definitions.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
import torch
from pytorch_lightning import LightningModule
from torchmetrics import Accuracy
from torchmultimodal.models.flava.model import (
flava_model_for_classification,
flava_model_for_pretraining,
)
from transformers.optimization import get_cosine_schedule_with_warmup
def get_optimizers_for_lightning(
model: torch.nn.Module,
learning_rate: float,
adam_eps: float,
adam_weight_decay: float,
adam_betas: Tuple[float, float],
warmup_steps: int,
max_steps: int,
):
optimizer = torch.optim.AdamW(
model.parameters(),
lr=learning_rate,
betas=adam_betas,
eps=adam_eps,
weight_decay=adam_weight_decay,
)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
class FLAVAPreTrainingLightningModule(LightningModule):
def __init__(
self,
learning_rate: float = 0.0002,
adam_eps: float = 1.0e-08,
adam_weight_decay: float = 0.01,
adam_betas: Tuple[float, float] = (0.9, 0.999),
warmup_steps: int = 2000,
max_steps: int = 450000,
**flava_pretraining_kwargs: Any,
):
super().__init__()
self.model = flava_model_for_pretraining(**flava_pretraining_kwargs)
self.learning_rate = learning_rate
self.adam_eps = adam_eps
self.adam_betas = adam_betas
self.adam_weight_decay = adam_weight_decay
self.warmup_steps = warmup_steps
self.max_steps = max_steps
def training_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
losses = output.losses
total_loss = 0
for key in losses:
if losses[key] is not None:
total_loss += losses[key]
self.log(f"train/losses/{key}", losses[key], prog_bar=True, logger=True)
return total_loss
def validation_step(self, batch, batch_idx):
output = self._step(batch, batch_idx)
losses = output.losses
total_loss = 0
for key in losses:
if losses[key] is not None:
total_loss += losses[key]
self.log(
f"validation/losses/{key}", losses[key], prog_bar=True, logger=True
)
return total_loss
def _step(self, batch, batch_idx):
if "image" in batch and ("text" in batch or "text_masked" in batch):
required_embedding = "mm"
elif "image" in batch:
required_embedding = "image"
elif "text" in batch or "text_masked" in batch:
required_embedding = "text"
else:
raise RuntimeError("Batch needs to have either or both 'image' and 'text'.")
output = self.model(
image=batch.get("image", None),
image_for_codebook=batch.get("image_for_codebook", None),
image_patches_mask=batch.get("image_patches_mask", None),
text=batch.get("text", None),
text_masked=batch.get("text_masked", None),
mlm_labels=batch.get("mlm_labels", None),
itm_labels=batch.get("itm_labels", None),
required_embedding=required_embedding,
)
return output
def configure_optimizers(self):
return get_optimizers_for_lightning(
self.model,
self.learning_rate,
self.adam_eps,
self.adam_weight_decay,
self.adam_betas,
self.warmup_steps,
self.max_steps,
)
class FLAVAClassificationLightningModule(LightningModule):
def __init__(
self,
num_classes: int,
learning_rate: float = 0.0002,
adam_eps: float = 1.0e-08,
adam_weight_decay: float = 0.01,
adam_betas: Tuple[float, float] = (0.9, 0.999),
warmup_steps: int = 2000,
max_steps: int = 450000,
**flava_classification_kwargs: Any,
):
super().__init__()
self.model = flava_model_for_classification(
num_classes, **flava_classification_kwargs
)
self.learning_rate = learning_rate
self.adam_eps = adam_eps
self.adam_weight_decay = adam_weight_decay
self.warmup_steps = warmup_steps
self.max_steps = max_steps
self.adam_betas = adam_betas
self.metrics = Accuracy()
def training_step(self, batch, batch_idx):
output, accuracy = self._step(batch, batch_idx)
self.log("train/losses/classification", output.loss, prog_bar=True, logger=True)
self.log(
"train/accuracy/classification",
accuracy,
prog_bar=True,
logger=True,
sync_dist=True,
)
return output.loss
def validation_step(self, batch, batch_idx):
output, accuracy = self._step(batch, batch_idx)
self.log(
"validation/losses/classification", output.loss, prog_bar=True, logger=True
)
self.log(
"validation/accuracy/classification",
accuracy,
prog_bar=True,
logger=True,
sync_dist=True,
)
return output.loss
def _step(self, batch, batch_idx):
if "image" in batch and ("text" in batch or "text_masked" in batch):
required_embedding = "mm"
elif "image" in batch:
required_embedding = "image"
elif "text" in batch or "text_masked" in batch:
required_embedding = "text"
else:
raise RuntimeError("Batch needs to have either or both 'image' and 'text'.")
labels = batch["labels"]
output = self.model(
image=batch.get("image", None),
text=batch.get("text", None),
required_embedding=required_embedding,
labels=labels,
)
accuracy = self.metrics(output.logits, labels)
return output, accuracy
def configure_optimizers(self):
return get_optimizers_for_lightning(
self.model,
self.learning_rate,
self.adam_eps,
self.adam_weight_decay,
self.adam_betas,
self.warmup_steps,
self.max_steps,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import torch
from flava.data.transforms import (
default_image_pretraining_transforms,
default_text_transform,
)
from torch import nn
from torch.utils.data import DataLoader
from torchmultimodal.models.flava.model import flava_model
from torchvision.datasets import CocoCaptions
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def compute_recall(similarity_scores: torch.Tensor, k: int = 5):
dataset_size = similarity_scores.size(0)
targets = torch.arange(dataset_size).view(dataset_size, -1)
_, topk_idx = torch.topk(similarity_scores, k)
recall = targets.eq(topk_idx).sum()
recall = recall / dataset_size
return recall
def transform(image, target):
_, image_transform = default_image_pretraining_transforms()
transformed_image = image_transform(image)
# Take the first caption for now
transformed_text = default_text_transform()(target[0])
return transformed_image, transformed_text
def collator(batch):
texts = []
images = torch.stack([x[0]["image"] for x in batch], dim=0)
texts = torch.cat([torch.LongTensor(x[1]["input_ids"]) for x in batch], dim=0)
return images, texts
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", help="Path to data root directory")
parser.add_argument("--annotations", help="Path to annotation file")
parser.add_argument("--batch_size", default=16)
args = parser.parse_args()
return args
def main():
args = setup_args()
dataset = CocoCaptions(
root=args.data_root, annFile=args.annotations, transforms=transform
)
flava = flava_model(pretrained=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
flava = flava.to(device)
flava.eval()
text_embeds = []
image_embeds = []
dataloader = DataLoader(dataset, batch_size=args.batch_size, collate_fn=collator)
for batch_idx, batch in enumerate(dataloader):
logger.info(f"Batch id {batch_idx}")
image, text = batch
_, text_emb = flava.encode_text(text.to(device), projection=True)
_, image_emb = flava.encode_image(image.to(device), projection=True)
text_embeds.append(text_emb.detach().cpu())
image_embeds.append(image_emb.detach().cpu())
image_embeds = torch.cat(image_embeds, 0)
text_embeds = torch.cat(text_embeds, 0)
image_embeds = nn.functional.normalize(image_embeds, dim=-1)
text_embeds = nn.functional.normalize(text_embeds, dim=-1)
similarity_scores = image_embeds @ text_embeds.t()
similarity_scores_t = similarity_scores.t()
image_to_text_r1 = compute_recall(similarity_scores, k=1)
image_to_text_r5 = compute_recall(similarity_scores, k=5)
text_to_image_r1 = compute_recall(similarity_scores_t, k=1)
text_to_image_r5 = compute_recall(similarity_scores_t, k=5)
logger.info(f"image_to_text_recall@1 {image_to_text_r1}")
logger.info(f"image_to_text_recall@5 {image_to_text_r5}")
logger.info(f"text_to_image_recall@1 {text_to_image_r1}")
logger.info(f"text_to_image_recall@5 {text_to_image_r5}")
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/coco_zero_shot.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flava.definitions import (
FLAVAArguments,
TrainingArguments,
TrainingSingleDatasetInfo,
)
from hydra.utils import instantiate
from omegaconf import OmegaConf
def build_datamodule_kwargs(
dm_config: TrainingSingleDatasetInfo, training_config: TrainingArguments
):
kwargs = {
"train_infos": dm_config.train,
"val_infos": dm_config.val,
"batch_size": dm_config.batch_size or training_config.batch_size,
"num_workers": dm_config.num_workers or training_config.num_workers,
"allow_uneven_batches": dm_config.allow_uneven_batches,
}
kwargs.update(dm_config.datamodule_extra_kwargs)
return kwargs
def build_config():
cli_conf = OmegaConf.from_cli()
if "config" not in cli_conf:
raise ValueError(
"Please pass 'config' to specify configuration yaml file for running FLAVA"
)
yaml_conf = OmegaConf.load(cli_conf.config)
conf = instantiate(yaml_conf)
cli_conf.pop("config")
config: FLAVAArguments = OmegaConf.merge(conf, cli_conf)
assert (
"max_steps" in config.training.lightning
), "lightning config must specify 'max_steps'"
return config
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from common.data import MultiDataModule
from flava.callbacks.multimodal_eval import MultimodalEvalCallback
from flava.data import ImageDataModule, MLMDataModule, VLDataModule
from flava.definitions import FLAVAArguments
from flava.model import FLAVAPreTrainingLightningModule
from flava.utils import build_config, build_datamodule_kwargs
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
def main():
config: FLAVAArguments = build_config()
if config.training.seed != -1:
seed_everything(config.training.seed, workers=True)
datamodules = []
# also needed for the imagenet eval callback
imagenet_datamodule = ImageDataModule(
**build_datamodule_kwargs(config.datasets.image, config.training)
)
if "image" in config.datasets.selected:
datamodules.append(imagenet_datamodule)
if "text" in config.datasets.selected:
mlm_datamodule = MLMDataModule(
**build_datamodule_kwargs(config.datasets.text, config.training)
)
datamodules.append(mlm_datamodule)
if "vl" in config.datasets.selected:
vl_datamodule = VLDataModule(
**build_datamodule_kwargs(config.datasets.vl, config.training)
)
datamodules.append(vl_datamodule)
datamodule = MultiDataModule(datamodules)
datamodule.setup("fit")
model = FLAVAPreTrainingLightningModule(
learning_rate=config.training.learning_rate,
adam_eps=config.training.adam_eps,
adam_weight_decay=config.training.adam_weight_decay,
adam_betas=config.training.adam_betas,
warmup_steps=config.training.warmup_steps,
max_steps=config.training.lightning.max_steps,
**config.model,
)
callbacks = [
LearningRateMonitor(logging_interval="step"),
MultimodalEvalCallback(imagenet_datamodule=imagenet_datamodule),
]
if config.training.lightning_checkpoint is not None:
callbacks.append(
ModelCheckpoint(
**OmegaConf.to_container(config.training.lightning_checkpoint)
)
)
trainer = Trainer(
**OmegaConf.to_container(config.training.lightning),
callbacks=callbacks,
)
ckpt_path = config.training.lightning_load_from_checkpoint
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
trainer.validate(model, datamodule=datamodule)
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flava.data import TextDataModule, TorchVisionDataModule
from flava.data.datamodules import VLDataModule
from flava.definitions import FLAVAArguments
from flava.model import FLAVAClassificationLightningModule
from flava.utils import build_config, build_datamodule_kwargs
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
AVAIL_GPUS = 1
SEED = -1
NUM_CLASSES = 2
NUM_WORKERS = 4
MAX_STEPS = 24000
BATCH_SIZE = 32
def main():
config: FLAVAArguments = build_config()
if config.training.seed != -1:
seed_everything(config.training.seed, workers=True)
assert len(config.datasets.selected) == 1
if "image" in config.datasets.selected:
datamodule = TorchVisionDataModule(
**build_datamodule_kwargs(config.datasets.image, config.training)
)
elif "text":
datamodule = TextDataModule(
**build_datamodule_kwargs(config.datasets.text, config.training)
)
else:
datamodule = VLDataModule(
**build_datamodule_kwargs(config.datasets.vl, config.training),
finetuning=True,
)
datamodule.setup("fit")
model = FLAVAClassificationLightningModule(
num_classes=config.datasets.num_classes,
learning_rate=config.training.learning_rate,
adam_eps=config.training.adam_eps,
adam_weight_decay=config.training.adam_weight_decay,
adam_betas=config.training.adam_betas,
warmup_steps=config.training.warmup_steps,
max_steps=config.training.lightning.max_steps,
**config.model,
)
callbacks = [
LearningRateMonitor(logging_interval="step"),
]
if config.training.lightning_checkpoint is not None:
callbacks.append(
ModelCheckpoint(
**OmegaConf.to_container(config.training.lightning_checkpoint)
)
)
trainer = Trainer(
**OmegaConf.to_container(config.training.lightning), callbacks=callbacks
)
ckpt_path = config.training.lightning_load_from_checkpoint
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
trainer.validate(datamodule=datamodule)
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from torchmultimodal.models.flava.model import flava_model_for_pretraining
KEY_REPLACEMENTS = {
"image_encoder.module": "image_encoder",
"text_encoder.module": "text_encoder",
"mm_encoder.module": "mm_encoder",
"mm_encoder.encoder.cls_token": "mm_encoder.cls_token",
"mm_image_projection": "image_to_mm_projection",
"mm_text_projection": "text_to_mm_projection",
"model.heads.cmd.mim_head": "loss.mmm_loss.mim",
"model.heads.cmd.mlm_head": "loss.mmm_loss.mlm",
"model.heads.fairseq_mlm": "loss.mlm_loss",
"model.heads.imagenet.mim_head": "loss.mim_loss",
"cls.predictions.transform": "cls",
"cls.predictions": "cls",
"cls.LayerNorm": "cls.layer_norm",
"model.text_projection": "loss.contrastive_loss.text_projection",
"model.image_projection": "loss.contrastive_loss.image_projection",
"model.heads.cmd.clip_head.logit_scale": "loss.contrastive_loss.logit_scale",
"model.heads.cmd.itm_head": "loss.itm_loss",
"intermediate.dense": "intermediate",
"output.dense": "output",
}
def convert_weights(args):
ckpt = torch.load(args.ckpt_file, map_location="cpu")
flava = flava_model_for_pretraining()
model = ckpt["model"]
import pdb
pdb.set_trace()
for key in list(model.keys()):
original = key
for option, replacement in KEY_REPLACEMENTS.items():
key = key.replace(option, replacement)
model[key] = model.pop(original)
if args.add_codebook:
# Since codebook is anyways not trained in FLAVA pretraining
# we can use the pretrained one that we get from FLAVA initialized
# model
model.update(
{
f"image_codebook.{key}": value
for key, value in flava.image_codebook.state_dict().items()
}
)
flava.load_state_dict(model)
# Let's save the model now.
torch.save(flava.state_dict(), args.save_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert weights")
parser.add_argument("ckpt_file", type=str)
parser.add_argument("save_file", type=str)
parser.add_argument("--add_codebook", action="store_true")
args = parser.parse_args()
convert_weights(args)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/tools/convert_weights.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/native/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
import torch
from torch import nn
from torchmultimodal.models.flava.model import flava_model_for_pretraining
from transformers.optimization import get_cosine_schedule_with_warmup
def get_optimizer(
model: torch.nn.Module,
learning_rate: float = 0.0002,
adam_eps: float = 1.0e-08,
adam_weight_decay: float = 0.01,
adam_betas: Tuple[int, int] = (0.9, 0.999),
warmup_steps: int = 2000,
max_steps: int = 450000,
):
optimizer = torch.optim.AdamW(
model.parameters(),
lr=learning_rate,
betas=adam_betas,
eps=adam_eps,
weight_decay=adam_weight_decay,
)
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
return optimizer, scheduler
class FLAVAPreTrainModule(nn.Module):
def __init__(
self,
use_bf16: bool = True,
**flava_pretraining_kwargs: Any,
):
super().__init__()
self.model = flava_model_for_pretraining(**flava_pretraining_kwargs)
self.use_bf16 = use_bf16
def forward(self, batch, action=None):
# super hacky
if action == "encode_text":
return self.model.encode_text(batch)
elif action == "encode_image":
return self.model.encode_image(batch)
if "image" in batch and ("text" in batch or "text_masked" in batch):
required_embedding = "mm"
elif "image" in batch:
required_embedding = "image"
elif "text" in batch or "text_masked" in batch:
required_embedding = "text"
else:
raise RuntimeError("Batch needs to have either or both 'image' and 'text'.")
output = self.model(
image=batch.get("image"),
image_for_codebook=batch.get("image_for_codebook"),
image_patches_mask=batch.get("image_patches_mask"),
text=batch.get("text"),
text_masked=batch.get("text_masked"),
mlm_labels=batch.get("mlm_labels"),
itm_labels=batch.get("itm_labels"),
required_embedding=required_embedding,
)
return output
def encode_text(self, *args, **kwargs):
return self.model.encode_text(*args, **kwargs)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/native/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from typing import Any
import torch
from flava.data.imagenet_zeroshot_data import (
imagenet_classnames,
openai_imagenet_template,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from tqdm import tqdm
# optional syntax-highlighting for console output
try:
from rich.console import Console
c = Console(force_terminal=True)
print = c.log
except ImportError:
pass
def build_config() -> DictConfig:
cli_conf = OmegaConf.from_cli()
yaml_conf = OmegaConf.load(cli_conf.config)
conf = instantiate(yaml_conf)
conf = OmegaConf.merge(conf, cli_conf)
return conf
# TODO replace with tlc.copy_data_to_device
def move_to_device(obj: Any, device: torch.device) -> Any:
if isinstance(obj, dict):
d = {}
for k, v in obj.items():
d[k] = move_to_device(v, device)
return d
if isinstance(obj, list):
l = []
for v in obj:
l.append(move_to_device(v, device))
return l
return obj.to(device)
def get_model_size_gb(model: torch.nn.Module) -> int:
return sum(p.numel() * p.element_size() for p in model.parameters()) / (1024**3)
def get_model_parameters(model: torch.nn.Module) -> int:
return sum(p.numel() for p in model.parameters())
def set_seed(seed: int) -> None:
torch.manual_seed(seed)
random.seed(seed)
def setup_distributed_device() -> torch.device:
if not torch.cuda.is_available() or not dist.is_available():
return torch.device("cpu")
dist.init_process_group("nccl")
local_rank = int(os.environ["LOCAL_RANK"])
print("local rank", local_rank)
torch.cuda.set_device(local_rank)
return torch.device(f"cuda:{local_rank}")
def print0(*args, **kwargs) -> None:
if not dist.is_initialized() or dist.get_rank() == 0:
print(*args, **kwargs)
def enable_tf32() -> None:
torch.backends.cudnn.allow_tf32 = True
torch.backends.cuda.matmul.allow_tf32 = True
def rank0_only(func):
def wrapper(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
return func(*args, **kwargs)
return wrapper
# zero shot classifier functions
def _zero_shot_classifier(model, device, text_transform, *args, **kwargs):
zeroshot_weights = []
for classname in tqdm(imagenet_classnames):
texts = text_transform(
[template(classname) for template in openai_imagenet_template]
)["input_ids"]
texts = texts.to(device)
class_embeddings = model(texts, action="encode_text")
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
return zeroshot_weights
def _accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [
float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy())
for k in topk
]
def run_imagenet_zero_shot(model, dataloader, device, text_transform, *args, **kwargs):
print0("Starting ImageNet Zero-Shot Eval")
print0("Building classifier")
classifier = _zero_shot_classifier(model, device, text_transform)
print0("Classifier built")
top1, top5, n = 0.0, 0.0, 0.0
for i, sample in tqdm(enumerate(dataloader)):
images = sample["image"]
target = sample["label"]
images = images.to(device)
target = target.to(device)
# predict
# if hasattr(model, "module"):
# image_features = model.module.encode_image({"image": images})
# else:
image_features = model(images, action="encode_image")
image_features /= image_features.norm(dim=-1, keepdim=True)
logits = 100.0 * image_features @ classifier
# measure accuracy
acc1, acc5 = _accuracy(logits, target, topk=(1, 5))
top1 += acc1
top5 += acc5
n += images.size(0)
if i == 5:
break
top1 = top1 / n
top5 = top5 / n
results = {}
results["imagenet-zeroshot-val-top1"] = top1
results["imagenet-zeroshot-val-top5"] = top5
print0("results: ", results)
return results
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/native/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# example command to train:
# `torchrun --nproc_per_node=8 -m flava.native.train config=flava/native/configs/pretrain_debug.yaml`
import time
from functools import partial
from typing import Any, Dict, Tuple, Union
import datasets
import numpy as np
import torch
import torch.distributed as dist
from common.data import MultiDataModule
from flava.definitions import FLAVAArguments
from flava.native.data import (
default_text_transform,
ImageDataModule,
MLMDataModule,
VL_MAX_LENGTH_DEFAULT,
VLDataModule,
)
from flava.native.model import FLAVAPreTrainModule, get_optimizer
from flava.native.utils import (
build_config,
enable_tf32,
get_model_parameters,
get_model_size_gb,
move_to_device,
print0,
run_imagenet_zero_shot,
set_seed,
setup_distributed_device,
)
from flava.utils import build_datamodule_kwargs
from omegaconf import DictConfig, OmegaConf
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
from torch.distributed.elastic.multiprocessing.errors import record
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from torchmultimodal.models.flava.image_encoder import ImageTransformer
from torchmultimodal.models.flava.text_encoder import BERTTextEncoder
from torchmultimodal.models.flava.transformer import FLAVATransformerWithoutEmbeddings
from torchmultimodal.modules.layers.transformer import TransformerEncoderLayer
from torchmultimodal.modules.losses.flava import FLAVAPretrainingLossOutput
def get_datamodules(config: FLAVAArguments) -> Tuple[MultiDataModule, ImageDataModule]:
datamodules = []
# also needed for the imagenet eval callback
imagenet_datamodule = ImageDataModule(
**build_datamodule_kwargs(config.datasets.image, config.training)
)
for dataset in config.datasets.selected:
if dataset == "image":
datamodules.append(imagenet_datamodule)
elif dataset == "text":
datamodules.append(
MLMDataModule(
**build_datamodule_kwargs(config.datasets.text, config.training)
)
)
elif dataset == "vl":
datamodules.append(
VLDataModule(
**build_datamodule_kwargs(config.datasets.vl, config.training)
)
)
else:
raise ValueError(f"unknown dataset: {dataset}")
return MultiDataModule(datamodules), imagenet_datamodule
@record
class Trainer:
def __init__(self, config: DictConfig):
if config.training.seed != -1:
set_seed(config.training.seed)
self.device: torch.device = setup_distributed_device()
self.config: DictConfig = config
self.rank: int = dist.get_rank()
self._logger: SummaryWriter = SummaryWriter(
f"logs/{config.training.strategy}/{int(time.time())}"
)
self.steps: int = -1
self.epochs: int = -1
multi_module, image_module = get_datamodules(config)
self.datamodule: MultiDataModule = multi_module
self.datamodule.setup("fit")
self.imagenet_val_dataloader = image_module.val_dataloader()
self.imagenet_val_text_transform = default_text_transform(
max_text_length=VL_MAX_LENGTH_DEFAULT
)
self.half_dtype = (
torch.bfloat16
if config.training.half_precision_format == "bfloat16"
else torch.float16
)
self.scaler = ShardedGradScaler() if config.training.enable_amp else None
def log(
self,
name: str,
value: Union[torch.Tensor, float, int],
log_rank_0: bool = True,
always_log: bool = False,
):
if log_rank_0 and self.rank != 0:
return
if always_log or self.steps % self.config.training.log_interval == 0:
self._logger.add_scalar(name, value, self.steps)
def create_model(self) -> torch.nn.Module:
model_config = self.config.get("model", {})
print0(f"using model config: {model_config}")
model = FLAVAPreTrainModule(**model_config)
strategy = self.config.training.strategy
print0(
f"before {strategy} model parameters: {get_model_parameters(model):,}, "
f"size: {get_model_size_gb(model):.3} GB"
)
if self.config.training.activation_checkpointing:
check_fn = lambda submodule: isinstance(submodule, TransformerEncoderLayer)
checkpoint_impl = CheckpointImpl.REENTRANT
# DDP gradient hooks have compatibility issues with REENTRANT autograd
if strategy == "ddp":
checkpoint_impl = CheckpointImpl.NO_REENTRANT
checkpoint_wrapper_fn = partial(
checkpoint_wrapper,
offload_to_cpu=False,
checkpoint_impl=checkpoint_impl,
)
apply_activation_checkpointing(
model,
checkpoint_wrapper_fn=checkpoint_wrapper_fn,
check_fn=check_fn,
)
if strategy == "ddp":
# TODO do we have to do this in FSDP too? see https://github.com/pytorch/pytorch/issues/75478
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.to(self.device)
print0(
f"after moving to cuda: {torch.cuda.memory_allocated()/1024**3:.3} GB"
)
model = DDP(
model,
device_ids=[self.rank],
find_unused_parameters=True,
gradient_as_bucket_view=True,
)
print0(f"after DDP: {torch.cuda.memory_allocated()/1024**3:.3} GB")
elif strategy == "fsdp":
mp = None
if self.config.training.enable_half_reduce_in_fsdp:
mp = MixedPrecision(
# param_dtype=self.half_dtype, not working
reduce_dtype=self.half_dtype,
# buffer_dtype=self.half_dtype,
)
model = FSDP(
model,
mixed_precision=mp,
device_id=self.device,
auto_wrap_policy=partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
TransformerEncoderLayer,
ImageTransformer,
BERTTextEncoder,
FLAVATransformerWithoutEmbeddings,
},
),
limit_all_gathers=True,
)
print0(f"after FSDP {torch.cuda.memory_allocated()/1024**3:.3} GB")
else:
raise ValueError(f"unknown strategy: {strategy}")
print0(
f"after {strategy} model parameters: {get_model_parameters(model):,}, "
f"size: {get_model_size_gb(model):.3} GB"
)
return model
def calculate_loss(
self, output: FLAVAPretrainingLossOutput, validation=False
) -> torch.Tensor:
losses = output.losses
total_loss = 0
for key in losses:
if losses[key] is not None:
total_loss += losses[key]
loss_reduce = losses[key].detach()
dist.reduce(loss_reduce, dst=0)
if validation:
mode = "validation"
else:
mode = "train"
self.log(
f"{mode}/losses/{key}",
loss_reduce.item() / dist.get_world_size(),
)
return total_loss
def preprocess_data(self, data: Dict[str, Any]):
data = self.datamodule.on_before_batch_transfer(data, None)
data = move_to_device(data, self.device)
return self.datamodule.on_after_batch_transfer(data, None)
def _log_iteration_times(self, iteration_times):
profile_warmup_steps = config.get("profile_warmup_steps", 100)
start_idx = (
profile_warmup_steps
if profile_warmup_steps < self.config.training.max_steps
else 0
)
iteration_times = iteration_times[start_idx:]
avg_it_time = np.mean(iteration_times)
avg_throughput = (
config.training.batch_size * dist.get_world_size()
) / avg_it_time
print0(f"Average over {len(iteration_times)} steps")
print0(f"Average iteration time {round(avg_it_time,4)}")
print0(f"Average throughput {round(avg_throughput,4)}")
def train(self) -> None:
print0(OmegaConf.to_container(self.config.training))
self.model = self.create_model()
model = self.model
optimizer, scheduler = get_optimizer(
model,
**self.config.training.optimizer,
)
iteration_times = []
while True:
t0 = time.time()
self.epochs += 1
dataloader = self.datamodule.train_dataloader()
dataloader.set_epoch(self.epochs)
for i, data in enumerate(dataloader):
torch.cuda.reset_peak_memory_stats()
self.steps += 1
if self.config.training.max_steps < self.steps:
if self.rank == 0:
self._log_iteration_times(iteration_times)
print0("Max steps reached, exiting")
return
model.train()
data = self.preprocess_data(data)
optimizer.zero_grad(set_to_none=True)
with torch.cuda.amp.autocast(
dtype=self.half_dtype, enabled=bool(self.scaler)
):
output = model(data)
print0(
f"after forward pass {torch.cuda.memory_allocated()/1024**3:.3} GB"
)
self.log(
"stats/fwd memory alloc",
torch.cuda.memory_allocated() / 1024**3,
)
self.log(
"stats/fwd memory reserved",
torch.cuda.memory_reserved() / 1024**3,
)
total_loss = self.calculate_loss(output)
if self.scaler:
self.scaler.scale(total_loss).backward()
self.scaler.step(optimizer)
self.scaler.update()
else:
total_loss.backward()
optimizer.step()
scheduler.step()
torch.cuda.synchronize()
t1 = time.time()
batch_time = t1 - t0
batch_size = config.training.batch_size * dist.get_world_size()
items_time = batch_size / (t1 - t0)
t0 = t1
self.log("stats/sec per batch", batch_time)
self.log("stats/items per sec", items_time)
total_loss = total_loss.detach()
dist.reduce(total_loss, dst=0)
if self.rank == 0:
norm_total_loss = total_loss.item() / dist.get_world_size()
print(
f"epoch: {self.epochs} step {self.steps} loss: {norm_total_loss:.4}"
)
self.log("train/loss", norm_total_loss)
self.log("stats/batch_size", batch_size)
iteration_times.append(batch_time)
cuda_info = torch.cuda.memory_stats()
print("cuda alloc retries ", cuda_info.get("num_alloc_retries", 0))
self.log(
"stats/max_gpu_allocated_gb",
torch.cuda.max_memory_allocated() / 1024**3,
)
# TODO implement imagenet eval
# TODO implement checkpoint saving
self.validate()
def validate(self):
if self.steps % self.config.training.validation_steps != 0 or self.steps == 0:
return
model = self.model
model.eval()
print0("evaluating")
validation_loader = self.datamodule.val_dataloader()
validation_loss = torch.Tensor([0]).to(self.device)
for data in validation_loader:
data = self.preprocess_data(data)
with torch.no_grad():
with torch.cuda.amp.autocast(
dtype=self.half_dtype, enabled=bool(self.scaler)
):
output = model(data)
total_loss = self.calculate_loss(output, validation=True)
validation_loss += total_loss.detach()
dist.reduce(validation_loss, dst=0)
norm_validation_loss = validation_loss.item() / dist.get_world_size()
print0(f"step {self.steps} EVAL loss: {norm_validation_loss:.4}")
def imagenet_validate(self):
print0("imagenet validation")
with torch.no_grad():
with torch.cuda.amp.autocast(
dtype=self.half_dtype, enabled=bool(self.scaler)
):
metrics = run_imagenet_zero_shot(
self.model,
self.imagenet_val_dataloader,
self.device,
self.imagenet_val_text_transform,
)
if metrics is not None:
for key in metrics:
self.log(
f"val/imagenet/{key}",
metrics[key],
always_log=True,
)
if __name__ == "__main__":
datasets.logging.set_verbosity_error() # too spammy
config: FLAVAArguments = build_config()
if config.training.enable_tf32:
enable_tf32()
trainer = Trainer(config)
trainer.train()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/native/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torchvision
from flava.data.transforms import (
default_image_pretraining_transforms,
default_text_transform,
default_torchvision_transforms,
encode_text_batch,
pad_batch,
TEXT_DEFAULT_TOKENIZER,
TEXT_WHOLE_WORD_MASK_TOKENIZER,
VL_MAX_LENGTH_DEFAULT,
VLTransform,
)
from flava.data.utils import build_datasets_from_info, fetch_images
from flava.definitions import HFDatasetInfo, TorchVisionDatasetInfo
from pytorch_lightning import LightningDataModule
from torch.utils.data.distributed import DistributedSampler
from transformers import (
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
DefaultDataCollator,
TRANSFORMERS_CACHE,
)
from transformers.data.data_collator import torch_default_data_collator
def transform_image(transform, sample):
sample.update(transform(sample["image"]))
return sample
def get_sampler(dataset, shuffle=True):
if dist.is_initialized():
return DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return torch.utils.data.RandomSampler(dataset)
return torch.utils.data.SequentialSampler(dataset)
class DataCollatorForWholeWordMaskRetainingBatch(DataCollatorForWholeWordMask):
def torch_call(
self, examples: List[Union[List[int], Any, Dict[str, Any]]]
) -> Dict[str, Any]:
masked_batch = super().torch_call(examples)
examples = torch_default_data_collator(examples)
examples["input_ids"] = masked_batch["input_ids"]
examples["labels"] = masked_batch["labels"]
return examples
class ImageDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
val_infos: Optional[List[HFDatasetInfo]] = None,
transforms: Optional[Tuple[Callable, Callable]] = None,
batch_size: int = 32,
num_workers: int = 4,
allow_uneven_batches: bool = False,
prefetch_factor: int = 2,
**kwargs: Any,
):
super().__init__()
self.train_dataset_infos = train_infos
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
self.batch_size = batch_size
self.num_workers = num_workers
self.allow_uneven_batches = allow_uneven_batches
self.prefetch_factor = prefetch_factor
if transforms is None:
transforms = default_image_pretraining_transforms()
self.train_transform, self.test_transform = transforms
def setup(self, stage=None):
train_transform = partial(transform_image, self.train_transform)
val_transform = partial(transform_image, self.test_transform)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(train_transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(val_transform)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=get_sampler(self.train_dataset, shuffle=True),
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=get_sampler(self.val_dataset, shuffle=False),
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
drop_last=True,
)
def test_dataloader(self):
return self.val_dataloader()
def on_before_batch_transfer(self, batch, *args):
if batch["label"].size(0) < self.batch_size and not self.allow_uneven_batches:
batch = pad_batch(batch, self.batch_size)
return batch
class TextDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
text_columns: List[str],
val_infos: Optional[List[HFDatasetInfo]] = None,
tokenizer: Optional[Callable] = None,
max_length: int = 512,
batch_size: int = 32,
num_workers: int = 4,
allow_uneven_batches: bool = False,
prefetch_factor: int = 2,
**kwargs: Any,
):
super().__init__()
self.train_dataset_infos = train_infos
self.text_columns = text_columns
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
self.tokenizer = tokenizer
self.max_length = max_length
self.batch_size = batch_size
self.num_workers = num_workers
self.allow_uneven_batches = allow_uneven_batches
self.prefetch_factor = prefetch_factor
def setup(self, stage=None):
if self.tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
transform = partial(
encode_text_batch,
tokenizer=self.tokenizer,
padding="max_length",
max_length=self.max_length,
truncation=True,
return_tensors="pt",
return_special_tokens_mask=True,
text_columns=self.text_columns,
return_batch=True,
)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(transform)
def train_dataloader(self):
return self._build_dataloader(self.train_dataset)
def val_dataloader(self):
return self._build_dataloader(self.val_dataset, shuffle=False)
def _build_dataloader(self, dataset, drop_last=False, shuffle=True):
return torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=get_sampler(dataset, shuffle),
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
collate_fn=self._build_collator(),
drop_last=drop_last,
)
def _build_collator(self):
return DefaultDataCollator()
def on_before_batch_transfer(self, batch, *args):
batch.pop("token_type_ids", None)
mask = batch.pop("attention_mask", None)
if mask.size(0) < self.batch_size and not self.allow_uneven_batches:
batch = pad_batch(batch, self.batch_size)
return batch
def on_after_batch_transfer(self, batch, *args):
batch["text"] = batch.pop("input_ids")
return batch
class MLMDataModule(TextDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
text_columns: List[str],
val_infos: Optional[List[HFDatasetInfo]] = None,
mlm_probability: float = 0.15,
ignore_index: int = -1,
**kwargs: Any,
):
super().__init__(train_infos, text_columns, val_infos, **kwargs)
self.mlm_probability = mlm_probability
self.ignore_index = ignore_index
def setup(self, stage=None):
if self.tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
transform = partial(
encode_text_batch,
tokenizer=self.tokenizer,
padding="max_length",
max_length=self.max_length,
truncation=True,
return_tensors="pt",
return_special_tokens_mask=True,
text_columns=self.text_columns,
return_batch=False,
)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(transform)
def _build_dataloader(self, dataset, drop_last=True, shuffle=True):
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
return super()._build_dataloader(dataset, drop_last=drop_last, shuffle=shuffle)
def _build_collator(self):
return DataCollatorForLanguageModeling(
self.tokenizer, mlm_probability=self.mlm_probability
)
def on_after_batch_transfer(self, batch, *args):
batch["text_masked"] = batch.pop("input_ids")
batch["mlm_labels"] = batch.pop("labels")
batch["mlm_labels"][batch["mlm_labels"] == -100] = self.ignore_index
return batch
class VLDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
val_infos: List[HFDatasetInfo],
text_transform: Optional[Callable] = None,
image_transforms: Optional[Tuple[Callable, Callable]] = None,
mlm_probablity: float = 0.15,
batch_size: int = 32,
num_workers: int = 4,
finetuning: bool = False,
ignore_index: int = -1,
itm_probability: float = 0.1,
allow_uneven_batches: bool = False,
fetch_num_threads: int = 4,
fetch_retries: int = 0,
fetch_sleep_timer: int = 0,
fetch_timeout: Optional[float] = None,
fetch_batch_size: int = 50,
prefetch_factor=2,
**kwargs,
):
super().__init__()
self.train_dataset_infos = train_infos
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
if image_transforms is None:
if not finetuning:
image_transforms = default_image_pretraining_transforms()
else:
image_transforms = default_torchvision_transforms(use_dict=True)
self.train_image_transform, self.test_image_transform = image_transforms
self.text_transform = text_transform
self.mlm_probability = mlm_probablity
self.batch_size = batch_size
self.num_workers = num_workers
self.ignore_index = ignore_index
self.itm_probability = itm_probability
self.allow_uneven_batches = allow_uneven_batches
self.fetch_num_threads = fetch_num_threads
self.fetch_retries = fetch_retries
self.fetch_sleep_timer = fetch_sleep_timer
self.fetch_timeout = fetch_timeout
self.fetch_batch_size = fetch_batch_size
self.prefetch_factor = prefetch_factor
def setup(self, stage=None):
if self.text_transform is None:
# TODO Update to use whole word mask vocab
text_tokenizer = BertTokenizer.from_pretrained(
TEXT_WHOLE_WORD_MASK_TOKENIZER
)
self.text_transform = default_text_transform(
text_tokenizer, max_text_length=VL_MAX_LENGTH_DEFAULT
)
self.text_tokenizer = self.text_transform.keywords["tokenizer"]
train_vl_transform = VLTransform(
self.train_image_transform, self.text_transform
)
val_vl_transform = VLTransform(self.test_image_transform, self.text_transform)
train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
train_dataset = train_dataset.map(
fetch_images,
batched=True,
batch_size=self.fetch_batch_size,
fn_kwargs={
"num_threads": self.fetch_num_threads,
"timeout": self.fetch_timeout,
"retries": self.fetch_retries,
"sleep_timer": self.fetch_sleep_timer,
},
)
train_dataset = train_dataset.filter(
lambda example: example["image"] is not None
)
self.train_dataset = train_dataset
self.train_dataset.set_transform(
partial(
train_vl_transform,
dataset=train_dataset.filter(lambda example: True),
itm_probability=self.itm_probability,
)
)
val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
val_dataset = val_dataset.map(
fetch_images,
batched=True,
batch_size=self.fetch_batch_size,
fn_kwargs={
"num_threads": self.fetch_num_threads,
"timeout": self.fetch_timeout,
"retries": self.fetch_retries,
"sleep_timer": self.fetch_sleep_timer,
},
)
val_dataset = val_dataset.filter(lambda example: example["image"] is not None)
self.val_dataset = val_dataset
self.val_dataset.set_transform(
partial(
val_vl_transform,
dataset=self.val_dataset.filter(
lambda example: True
), # Pass a copy to transform
itm_probability=self.itm_probability,
)
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=get_sampler(self.train_dataset),
collate_fn=self._build_collator(),
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=get_sampler(self.val_dataset, shuffle=False),
collate_fn=self._build_collator(),
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
drop_last=True,
)
def _build_collator(self):
return DataCollatorForWholeWordMaskRetainingBatch(
self.text_tokenizer, mlm_probability=self.mlm_probability
)
def on_before_batch_transfer(self, batch, *args):
batch.pop("token_type_ids", None)
mask = batch.pop("attention_mask", None)
if (
mask is not None
and mask.size(0) < self.batch_size
and not self.allow_uneven_batches
):
batch = pad_batch(batch, self.batch_size)
return batch
def on_after_batch_transfer(self, batch, *args):
text_masked = batch.pop("input_ids")
mlm_labels = batch.pop("labels", None)
mlm_labels[mlm_labels == -100] = self.ignore_index
text = text_masked.detach().clone()
text[mlm_labels != -1] = mlm_labels[mlm_labels != -1]
batch.update(
{"mlm_labels": mlm_labels, "text": text, "text_masked": text_masked}
)
return batch
class TorchVisionDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[TorchVisionDatasetInfo],
# Val info is not used for torchvision datamodule, but kept to keep things consistent
val_infos: Optional[List[TorchVisionDatasetInfo]] = None,
dataset_root: Optional[str] = None,
image_transforms: Optional[Tuple[Callable, Callable]] = None,
batch_size: int = 32,
num_workers: int = 4,
prefetch_factor: int = 2,
**kwargs: Any,
):
super().__init__()
self.train_info = train_infos[0]
if val_infos is None:
val_infos = train_infos
self.val_info = val_infos[0]
self.train_class_ptr, self.train_root = self._parse_info(
self.train_info, dataset_root=dataset_root
)
self.val_class_ptr, self.val_root = self._parse_info(
self.val_info, dataset_root=dataset_root
)
if image_transforms is None:
image_transforms = default_torchvision_transforms()
self.train_transform, self.test_transform = image_transforms
self.batch_size = batch_size
self.num_workers = num_workers
self.prefetch_factor = prefetch_factor
def _parse_info(
self, info: TorchVisionDatasetInfo, dataset_root: Optional[str] = None
):
assert hasattr(
torchvision.datasets, info.key
), f"No dataset named {info.key} present in torchvision.datasets"
class_ptr = getattr(torchvision.datasets, info.key)
if dataset_root is None:
dataset_root = os.path.join(TRANSFORMERS_CACHE, "datasets", "torchvision")
dataset_root = os.path.join(dataset_root, class_ptr.__name__.lower())
os.makedirs(dataset_root, exist_ok=True)
return class_ptr, dataset_root
def setup(self, stage=None):
self.train_dataset = self.train_class_ptr(
self.train_root,
split=self.train_info.train_split,
transform=self.train_transform,
download=True,
)
if self.val_info.has_val:
self.val_dataset = self.val_class_ptr(
self.val_root,
split=self.val_info.val_split,
transform=self.test_transform,
download=True,
)
self.test_dataset = self.val_class_ptr(
self.val_root,
split=self.val_info.test_split,
transform=self.test_transform,
download=True,
)
def train_dataloader(self):
return self._build_dataloader(self.train_dataset)
def val_dataloader(self):
if self.val_info.has_val:
dataset = self.val_dataset
else:
dataset = self.test_dataset
return self._build_dataloader(dataset, shuffle=False)
def test_dataloader(self):
return self._build_dataloader(self.test_dataset, shuffle=False)
def _build_dataloader(self, dataset: torch.utils.data.Dataset, shuffle=True):
return torch.utils.data.DataLoader(
dataset,
sampler=get_sampler(dataset, shuffle),
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
persistent_workers=True,
prefetch_factor=self.prefetch_factor,
)
def on_before_batch_transfer(self, batch, *args):
images, targets = batch
batch = {"image": images, "labels": targets}
return batch
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/native/data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
from flava.data import default_text_transform, VL_MAX_LENGTH_DEFAULT
from flava.data.imagenet_zeroshot_data import (
imagenet_classnames,
openai_imagenet_template,
)
from pytorch_lightning import Callback, LightningDataModule
from pytorch_lightning.utilities import rank_zero_only
from tqdm import tqdm
logger = logging.getLogger(__name__)
def _zero_shot_classifier(model, device, text_transform, *args, **kwargs):
zeroshot_weights = []
for classname in tqdm(imagenet_classnames):
texts = text_transform(
[template(classname) for template in openai_imagenet_template]
)["input_ids"]
texts = texts.to(device)
class_embeddings = model.encode_text(texts)
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(device)
return zeroshot_weights
def _accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [
float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy())
for k in topk
]
@rank_zero_only
def run_imagenet_zero_shot(model, dataloader, device, text_transform, *args, **kwargs):
logger.info("Starting ImageNet Zero-Shot Eval")
logger.info("Building classifier")
classifier = _zero_shot_classifier(model, device, text_transform)
logger.info("Classifier built")
top1, top5, n = 0.0, 0.0, 0.0
for sample in tqdm(dataloader):
images = sample["image"]
target = sample["label"]
images = images.to(device)
target = target.to(device)
# predict
# if hasattr(model, "module"):
# image_features = model.module.encode_image({"image": images})
# else:
image_features = model.encode_image(images)
image_features /= image_features.norm(dim=-1, keepdim=True)
logits = 100.0 * image_features @ classifier
# measure accuracy
acc1, acc5 = _accuracy(logits, target, topk=(1, 5))
top1 += acc1
top5 += acc5
n += images.size(0)
top1 = top1 / n
top5 = top5 / n
results = {}
results["imagenet-zeroshot-val-top1"] = top1
results["imagenet-zeroshot-val-top5"] = top5
return results
class MultimodalEvalCallback(Callback):
def __init__(self, imagenet_datamodule: LightningDataModule, *args, **kwargs):
super().__init__()
self.imagenet_val_dataloader = imagenet_datamodule.val_dataloader()
self.text_transform = default_text_transform(
max_text_length=VL_MAX_LENGTH_DEFAULT
)
@torch.no_grad()
def on_validation_start(self, trainer, pl_module, **kwargs) -> None:
metrics = run_imagenet_zero_shot(
pl_module.model,
self.imagenet_val_dataloader,
pl_module.device,
self.text_transform,
)
if metrics is not None:
for key in metrics:
self.log(
f"val/{key}",
metrics[key],
prog_bar=True,
logger=True,
rank_zero_only=True,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/callbacks/multimodal_eval.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .multimodal_eval import * # noqa F401
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/callbacks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from functools import partial
from typing import Any, Callable, Optional
import torch
from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
from torchvision import transforms
from transformers import BertTokenizer
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGE_DEFAULT_SIZE = (224, 224)
VL_MAX_LENGTH_DEFAULT = 77
TEXT_MAX_LENGTH_DEFAULT = 512
TEXT_DEFAULT_TOKENIZER = "bert-base-uncased"
TEXT_WHOLE_WORD_MASK_TOKENIZER = "bert-large-uncased-whole-word-masking"
def encode_text(text, tokenizer, *args, **kwargs):
return tokenizer(text, *args, **kwargs)
def encode_text_batch(
batch, tokenizer, text_columns, return_batch=False, *args, **kwargs
):
texts = [batch[column] for column in text_columns]
tokens = tokenizer(*texts, *args, **kwargs)
if return_batch:
batch.update(tokens)
return batch
return tokens
def transform_image_dict(transform, image_dict, *args, **kwargs):
return {"image": transform(image_dict["image"], *args, **kwargs)}
def default_torchvision_transforms(
size=IMAGE_DEFAULT_SIZE,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
use_dict=False,
):
transform = transforms.Compose(
[
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize(
mean=mean,
std=std,
),
]
)
if use_dict:
transform = partial(transform_image_dict, transform=transform)
return transform, transform
def default_image_pretraining_transforms():
return FLAVAImageTransform(), FLAVAImageTransform(is_train=False)
def default_text_transform(
text_tokenizer: Optional[Callable] = None,
max_text_length: int = TEXT_MAX_LENGTH_DEFAULT,
**kwargs: Any,
):
if text_tokenizer is None:
text_tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
text_transform = partial(
encode_text,
tokenizer=text_tokenizer,
padding="max_length",
max_length=max_text_length,
truncation=True,
return_tensors="pt",
return_special_tokens_mask=True,
)
return text_transform
def default_vl_text_transform(
text_tokenizer: Optional[Callable] = None,
max_text_length: int = VL_MAX_LENGTH_DEFAULT,
**kwargs: Any,
):
if text_tokenizer is None:
text_tokenizer = BertTokenizer.from_pretrained(TEXT_WHOLE_WORD_MASK_TOKENIZER)
return default_text_transform(text_tokenizer, max_text_length=max_text_length)
def pad_batch(batch, batch_size):
for item in batch.keys():
if isinstance(batch[item], torch.Tensor):
diff = batch_size - batch[item].size(0)
pad = batch[item][-diff:].detach().clone()
batch[item] = torch.cat([batch[item], pad], dim=0)
return batch
class VLTransform:
def __init__(self, image_transform, text_transform):
self.image_transform = image_transform
self.text_transform = text_transform
def __call__(self, info, dataset, itm_probability):
output = {}
text = info["text"]
image = info["image"]
if itm_probability > 0:
output["itm_labels"] = torch.ones((1), dtype=torch.long)
if random.random() < itm_probability:
while text == info["text"]:
text = dataset.select([random.randint(0, len(dataset) - 1)])[0]["text"]
output["itm_labels"] = torch.zeros((1), dtype=torch.long)
output.update(self.image_transform(image))
output.update(self.text_transform(text))
return output
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/data/transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .transforms import * # noqa F401
from .utils import * # noqa F401
from .imagenet_zeroshot_data import * # noqa F401
from .datamodules import * # noqa F401
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/data/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import List
import requests
from datasets import concatenate_datasets, load_dataset
from datasets.utils.file_utils import get_datasets_user_agent
from flava.definitions import HFDatasetInfo
from PIL import Image, UnidentifiedImageError
DATASETS_USER_AGENT = get_datasets_user_agent()
def build_datasets_from_info(dataset_infos: List[HFDatasetInfo], split: str = "train"):
dataset_list = []
for dataset_info in dataset_infos:
current_dataset = load_dataset(
dataset_info.key,
dataset_info.subset,
split=dataset_info.split_key_mapping[split],
use_auth_token=True,
**dataset_info.extra_kwargs,
)
if dataset_info.remove_columns is not None:
current_dataset = current_dataset.remove_columns(
dataset_info.remove_columns
)
if dataset_info.rename_columns is not None:
for rename in dataset_info.rename_columns:
current_dataset = current_dataset.rename_column(rename[0], rename[1])
dataset_list.append(current_dataset)
return concatenate_datasets(dataset_list)
def fetch_single_image(image_url, timeout, retries=0, sleep_timer=0):
for _ in range(retries + 1):
try:
image = Image.open(
requests.get(
image_url,
stream=True,
headers={"user-agent": DATASETS_USER_AGENT},
timeout=timeout,
).raw
)
break
except (requests.exceptions.ConnectionError, UnidentifiedImageError):
image = None
time.sleep(sleep_timer)
return image
def fetch_images(batch, num_threads, timeout=None, retries=0, sleep_timer=0):
if "image" in batch:
# This dataset already has "image" defined.
return batch
with ThreadPoolExecutor(max_workers=num_threads) as executor:
batch["image"] = list(
executor.map(
partial(
fetch_single_image,
timeout=timeout,
retries=retries,
sleep_timer=sleep_timer,
),
batch["image_url"],
)
)
return batch
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/data/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torchvision
from flava.definitions import HFDatasetInfo, TorchVisionDatasetInfo
from pytorch_lightning import LightningDataModule
from transformers import (
BertTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
DefaultDataCollator,
TRANSFORMERS_CACHE,
)
from transformers.data.data_collator import torch_default_data_collator
from .transforms import (
default_image_pretraining_transforms,
default_text_transform,
default_torchvision_transforms,
encode_text_batch,
pad_batch,
TEXT_DEFAULT_TOKENIZER,
TEXT_WHOLE_WORD_MASK_TOKENIZER,
VL_MAX_LENGTH_DEFAULT,
VLTransform,
)
from .utils import build_datasets_from_info, fetch_images
def transform_image(transform, sample):
sample.update(transform(sample["image"]))
return sample
class DataCollatorForWholeWordMaskRetainingBatch(DataCollatorForWholeWordMask):
def torch_call(
self, examples: List[Union[List[int], Any, Dict[str, Any]]]
) -> Dict[str, Any]:
masked_batch = super().torch_call(examples)
examples = torch_default_data_collator(examples)
examples["input_ids"] = masked_batch["input_ids"]
examples["labels"] = masked_batch["labels"]
return examples
class ImageDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
val_infos: Optional[List[HFDatasetInfo]] = None,
transforms: Optional[Tuple[Callable, Callable]] = None,
batch_size: int = 32,
num_workers: int = 4,
allow_uneven_batches: bool = False,
**kwargs: Any,
):
super().__init__()
self.train_dataset_infos = train_infos
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
self.batch_size = batch_size
self.num_workers = num_workers
self.allow_uneven_batches = allow_uneven_batches
if transforms is None:
transforms = default_image_pretraining_transforms()
self.train_transform, self.test_transform = transforms
def setup(self, stage=None):
train_transform = partial(transform_image, self.train_transform)
val_transform = partial(transform_image, self.test_transform)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(train_transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(val_transform)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=None,
shuffle=True,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=None,
shuffle=False,
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
drop_last=True,
)
def test_dataloader(self):
return self.val_dataloader()
def on_before_batch_transfer(self, batch, *args):
if batch["label"].size(0) < self.batch_size and not self.allow_uneven_batches:
batch = pad_batch(batch, self.batch_size)
return batch
class TextDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
text_columns: List[str],
val_infos: Optional[List[HFDatasetInfo]] = None,
tokenizer: Optional[Callable] = None,
max_length: int = 512,
batch_size: int = 32,
num_workers: int = 4,
allow_uneven_batches: bool = False,
**kwargs: Any,
):
super().__init__()
self.train_dataset_infos = train_infos
self.text_columns = text_columns
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
self.tokenizer = tokenizer
self.max_length = max_length
self.batch_size = batch_size
self.num_workers = num_workers
self.allow_uneven_batches = allow_uneven_batches
def setup(self, stage=None):
if self.tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
transform = partial(
encode_text_batch,
tokenizer=self.tokenizer,
padding="max_length",
max_length=self.max_length,
truncation=True,
return_tensors="pt",
return_special_tokens_mask=True,
text_columns=self.text_columns,
return_batch=True,
)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(transform)
def train_dataloader(self):
return self._build_dataloader(self.train_dataset)
def val_dataloader(self):
return self._build_dataloader(self.val_dataset, shuffle=False)
def _build_dataloader(self, dataset, drop_last=False, shuffle=True):
return torch.utils.data.DataLoader(
dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=None,
shuffle=shuffle,
collate_fn=self._build_collator(),
drop_last=drop_last,
)
def _build_collator(self):
return DefaultDataCollator()
def on_before_batch_transfer(self, batch, *args):
batch.pop("token_type_ids", None)
mask = batch.pop("attention_mask", None)
if mask.size(0) < self.batch_size and not self.allow_uneven_batches:
batch = pad_batch(batch, self.batch_size)
return batch
def on_after_batch_transfer(self, batch, *args):
batch["text"] = batch.pop("input_ids")
return batch
class MLMDataModule(TextDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
text_columns: List[str],
val_infos: Optional[List[HFDatasetInfo]] = None,
mlm_probability: float = 0.15,
ignore_index: int = -1,
**kwargs: Any,
):
super().__init__(train_infos, text_columns, val_infos, **kwargs)
self.mlm_probability = mlm_probability
self.ignore_index = ignore_index
def setup(self, stage=None):
if self.tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
transform = partial(
encode_text_batch,
tokenizer=self.tokenizer,
padding="max_length",
max_length=self.max_length,
truncation=True,
return_tensors="pt",
return_special_tokens_mask=True,
text_columns=self.text_columns,
return_batch=False,
)
self.train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
self.train_dataset.set_transform(transform)
self.val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
self.val_dataset.set_transform(transform)
def _build_dataloader(self, dataset, drop_last=True, shuffle=True):
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
# ideally, we don't need to drop these for unimodal cases
# but just to be safe
return super()._build_dataloader(dataset, drop_last=drop_last, shuffle=shuffle)
def _build_collator(self):
return DataCollatorForLanguageModeling(
self.tokenizer, mlm_probability=self.mlm_probability
)
def on_after_batch_transfer(self, batch, *args):
batch["text_masked"] = batch.pop("input_ids")
batch["mlm_labels"] = batch.pop("labels")
batch["mlm_labels"][batch["mlm_labels"] == -100] = self.ignore_index
return batch
class VLDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[HFDatasetInfo],
val_infos: List[HFDatasetInfo],
text_transform: Optional[Callable] = None,
image_transforms: Optional[Tuple[Callable, Callable]] = None,
mlm_probablity: float = 0.15,
batch_size: int = 32,
num_workers: int = 4,
finetuning: bool = False,
ignore_index: int = -1,
itm_probability: float = 0.1,
allow_uneven_batches: bool = False,
fetch_num_threads: int = 4,
fetch_retries: int = 0,
fetch_sleep_timer: int = 0,
fetch_timeout: Optional[float] = None,
fetch_batch_size: int = 50,
**kwargs,
):
super().__init__()
self.train_dataset_infos = train_infos
self.val_dataset_infos = val_infos
if self.val_dataset_infos is None:
self.val_dataset_infos = train_infos
if image_transforms is None:
if not finetuning:
image_transforms = default_image_pretraining_transforms()
else:
image_transforms = default_torchvision_transforms(use_dict=True)
self.train_image_transform, self.test_image_transform = image_transforms
self.text_transform = text_transform
self.mlm_probability = mlm_probablity
self.batch_size = batch_size
self.num_workers = num_workers
self.ignore_index = ignore_index
self.itm_probability = itm_probability
self.allow_uneven_batches = allow_uneven_batches
self.fetch_num_threads = fetch_num_threads
self.fetch_retries = fetch_retries
self.fetch_sleep_timer = fetch_sleep_timer
self.fetch_timeout = fetch_timeout
self.fetch_batch_size = fetch_batch_size
def setup(self, stage=None):
if self.text_transform is None:
# TODO Update to use whole word mask vocab
text_tokenizer = BertTokenizer.from_pretrained(
TEXT_WHOLE_WORD_MASK_TOKENIZER
)
self.text_transform = default_text_transform(
text_tokenizer, max_text_length=VL_MAX_LENGTH_DEFAULT
)
self.text_tokenizer = self.text_transform.keywords["tokenizer"]
train_vl_transform = VLTransform(
self.train_image_transform, self.text_transform
)
val_vl_transform = VLTransform(self.test_image_transform, self.text_transform)
train_dataset = build_datasets_from_info(
self.train_dataset_infos, split="train"
)
train_dataset = train_dataset.map(
fetch_images,
batched=True,
batch_size=self.fetch_batch_size,
fn_kwargs={
"num_threads": self.fetch_num_threads,
"timeout": self.fetch_timeout,
"retries": self.fetch_retries,
"sleep_timer": self.fetch_sleep_timer,
},
)
train_dataset = train_dataset.filter(
lambda example: example["image"] is not None
)
self.train_dataset = train_dataset
self.train_dataset.set_transform(
partial(
train_vl_transform,
dataset=train_dataset.filter(lambda example: True),
itm_probability=self.itm_probability,
)
)
val_dataset = build_datasets_from_info(
self.val_dataset_infos, split="validation"
)
val_dataset = val_dataset.map(
fetch_images,
batched=True,
batch_size=self.fetch_batch_size,
fn_kwargs={
"num_threads": self.fetch_num_threads,
"timeout": self.fetch_timeout,
"retries": self.fetch_retries,
"sleep_timer": self.fetch_sleep_timer,
},
)
val_dataset = val_dataset.filter(lambda example: example["image"] is not None)
self.val_dataset = val_dataset
self.val_dataset.set_transform(
partial(
val_vl_transform,
dataset=self.val_dataset.filter(
lambda example: True
), # Pass a copy to transform
itm_probability=self.itm_probability,
)
)
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=None,
shuffle=True,
collate_fn=self._build_collator(),
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
drop_last=True,
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
sampler=None,
shuffle=False,
collate_fn=self._build_collator(),
# uneven batches can cause distributed issues,
# drop last batch to prevent those.
drop_last=True,
)
def _build_collator(self):
return DataCollatorForWholeWordMaskRetainingBatch(
self.text_tokenizer, mlm_probability=self.mlm_probability
)
def on_before_batch_transfer(self, batch, *args):
batch.pop("token_type_ids", None)
mask = batch.pop("attention_mask", None)
if (
mask is not None
and mask.size(0) < self.batch_size
and not self.allow_uneven_batches
):
batch = pad_batch(batch, self.batch_size)
return batch
def on_after_batch_transfer(self, batch, *args):
text_masked = batch.pop("input_ids")
mlm_labels = batch.pop("labels", None)
mlm_labels[mlm_labels == -100] = self.ignore_index
text = text_masked.detach().clone()
text[mlm_labels != -1] = mlm_labels[mlm_labels != -1]
batch.update(
{"mlm_labels": mlm_labels, "text": text, "text_masked": text_masked}
)
return batch
class TorchVisionDataModule(LightningDataModule):
def __init__(
self,
train_infos: List[TorchVisionDatasetInfo],
# Val info is not used for torchvision datamodule, but kept to keep things consistent
val_infos: Optional[List[TorchVisionDatasetInfo]] = None,
dataset_root: Optional[str] = None,
image_transforms: Optional[Tuple[Callable, Callable]] = None,
batch_size: int = 32,
num_workers: int = 4,
**kwargs: Any,
):
super().__init__()
self.train_info = train_infos[0]
if val_infos is None:
val_infos = train_infos
self.val_info = val_infos[0]
self.train_class_ptr, self.train_root = self._parse_info(
self.train_info, dataset_root=dataset_root
)
self.val_class_ptr, self.val_root = self._parse_info(
self.val_info, dataset_root=dataset_root
)
if image_transforms is None:
image_transforms = default_torchvision_transforms()
self.train_transform, self.test_transform = image_transforms
self.batch_size = batch_size
self.num_workers = num_workers
def _parse_info(
self, info: TorchVisionDatasetInfo, dataset_root: Optional[str] = None
):
assert hasattr(
torchvision.datasets, info.key
), f"No dataset named {info.key} present in torchvision.datasets"
class_ptr = getattr(torchvision.datasets, info.key)
if dataset_root is None:
dataset_root = os.path.join(TRANSFORMERS_CACHE, "datasets", "torchvision")
dataset_root = os.path.join(dataset_root, class_ptr.__name__.lower())
os.makedirs(dataset_root, exist_ok=True)
return class_ptr, dataset_root
def setup(self, stage=None):
self.train_dataset = self.train_class_ptr(
self.train_root,
split=self.train_info.train_split,
transform=self.train_transform,
download=True,
)
if self.val_info.has_val:
self.val_dataset = self.val_class_ptr(
self.val_root,
split=self.val_info.val_split,
transform=self.test_transform,
download=True,
)
self.test_dataset = self.val_class_ptr(
self.val_root,
split=self.val_info.test_split,
transform=self.test_transform,
download=True,
)
def train_dataloader(self):
return self._build_dataloader(self.train_dataset)
def val_dataloader(self):
if self.val_info.has_val:
dataset = self.val_dataset
else:
dataset = self.test_dataset
return self._build_dataloader(dataset, shuffle=False)
def test_dataloader(self):
return self._build_dataloader(self.test_dataset, shuffle=False)
def _build_dataloader(self, dataset: torch.utils.data.Dataset, shuffle=True):
return torch.utils.data.DataLoader(
dataset,
shuffle=shuffle,
batch_size=self.batch_size,
num_workers=self.num_workers,
)
def on_before_batch_transfer(self, batch, *args):
images, targets = batch
batch = {"image": images, "labels": targets}
return batch
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/data/datamodules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# File taken from https://github.com/mlfoundations/open_clip/
imagenet_classnames = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead shark",
"electric ray",
"stingray",
"rooster",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"American robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"American dipper",
"kite (bird of prey)",
"bald eagle",
"vulture",
"great grey owl",
"fire salamander",
"smooth newt",
"newt",
"spotted salamander",
"axolotl",
"American bullfrog",
"tree frog",
"tailed frog",
"loggerhead sea turtle",
"leatherback sea turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"green iguana",
"Carolina anole",
"desert grassland whiptail lizard",
"agama",
"frilled-necked lizard",
"alligator lizard",
"Gila monster",
"European green lizard",
"chameleon",
"Komodo dragon",
"Nile crocodile",
"American alligator",
"triceratops",
"worm snake",
"ring-necked snake",
"eastern hog-nosed snake",
"smooth green snake",
"kingsnake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"African rock python",
"Indian cobra",
"green mamba",
"sea snake",
"Saharan horned viper",
"eastern diamondback rattlesnake",
"sidewinder rattlesnake",
"trilobite",
"harvestman",
"scorpion",
"yellow garden spider",
"barn spider",
"European garden spider",
"southern black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie grouse",
"peafowl",
"quail",
"partridge",
"african grey parrot",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"duck",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"red king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"great egret",
"bittern bird",
"crane bird",
"limpkin",
"common gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"dunlin",
"common redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese Chin",
"Maltese",
"Pekingese",
"Shih Tzu",
"King Charles Spaniel",
"Papillon",
"toy terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick Coonhound",
"Black and Tan Coonhound",
"Treeing Walker Coonhound",
"English foxhound",
"Redbone Coonhound",
"borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizan Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bull Terrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wire Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale Terrier",
"Cairn Terrier",
"Australian Terrier",
"Dandie Dinmont Terrier",
"Boston Terrier",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scottish Terrier",
"Tibetan Terrier",
"Australian Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa Apso",
"Flat-Coated Retriever",
"Curly-coated Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Shorthaired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany dog",
"Clumber Spaniel",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael dog",
"Malinois",
"Briard",
"Australian Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"collie",
"Border Collie",
"Bouvier des Flandres dog",
"Rottweiler",
"German Shepherd Dog",
"Dobermann",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller Sennenhund",
"Entlebucher Sennenhund",
"Boxer",
"Bullmastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"St. Bernard",
"husky",
"Alaskan Malamute",
"Siberian Husky",
"Dalmatian",
"Affenpinscher",
"Basenji",
"pug",
"Leonberger",
"Newfoundland dog",
"Great Pyrenees dog",
"Samoyed",
"Pomeranian",
"Chow Chow",
"Keeshond",
"brussels griffon",
"Pembroke Welsh Corgi",
"Cardigan Welsh Corgi",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican hairless dog (xoloitzcuintli)",
"grey wolf",
"Alaskan tundra wolf",
"red wolf or maned wolf",
"coyote",
"dingo",
"dhole",
"African wild dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby cat",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian Mau",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"polar bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"longhorn beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket insect",
"stick insect",
"cockroach",
"praying mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"red admiral butterfly",
"ringlet butterfly",
"monarch butterfly",
"small white butterfly",
"sulphur butterfly",
"gossamer-winged butterfly",
"starfish",
"sea urchin",
"sea cucumber",
"cottontail rabbit",
"hare",
"Angora rabbit",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"common sorrel horse",
"zebra",
"pig",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram (adult male sheep)",
"bighorn sheep",
"Alpine ibex",
"hartebeest",
"impala (antelope)",
"gazelle",
"arabian camel",
"llama",
"weasel",
"mink",
"European polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas monkey",
"baboon",
"macaque",
"langur",
"black-and-white colobus",
"proboscis monkey",
"marmoset",
"white-headed capuchin",
"howler monkey",
"titi monkey",
"Geoffroy's spider monkey",
"common squirrel monkey",
"ring-tailed lemur",
"indri",
"Asian elephant",
"African bush elephant",
"red panda",
"giant panda",
"snoek fish",
"eel",
"silver salmon",
"rock beauty fish",
"clownfish",
"sturgeon",
"gar fish",
"lionfish",
"pufferfish",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibious vehicle",
"analog clock",
"apiary",
"apron",
"trash can",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint pen",
"Band-Aid",
"banjo",
"baluster / handrail",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"wheelbarrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"swimming cap",
"bath towel",
"bathtub",
"station wagon",
"lighthouse",
"beaker",
"military hat (bearskin or shako)",
"beer bottle",
"beer glass",
"bell tower",
"baby bib",
"tandem bicycle",
"bikini",
"ring binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsleigh",
"bolo tie",
"poke bonnet",
"bookcase",
"bookstore",
"bottle cap",
"hunting bow",
"bow tie",
"brass memorial plaque",
"bra",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"high-speed train",
"butcher shop",
"taxicab",
"cauldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"tool kit",
"cardboard box / carton",
"car wheel",
"automated teller machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"mobile phone",
"chain",
"chain-link fence",
"chain mail",
"chainsaw",
"storage chest",
"chiffonier",
"bell or wind chime",
"china cabinet",
"Christmas stocking",
"church",
"movie theater",
"cleaver",
"cliff dwelling",
"cloak",
"clogs",
"cocktail shaker",
"coffee mug",
"coffeemaker",
"spiral or coil",
"combination lock",
"computer keyboard",
"candy store",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"construction crane",
"crash helmet",
"crate",
"infant bed",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"rotary dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishcloth",
"dishwasher",
"disc brake",
"dock",
"dog sled",
"dome",
"doormat",
"drilling rig",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso machine",
"face powder",
"feather boa",
"filing cabinet",
"fireboat",
"fire truck",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster bed",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gas mask or respirator",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golf cart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"radiator grille",
"grocery store",
"guillotine",
"hair clip",
"hair spray",
"half-track",
"hammer",
"hamper",
"hair dryer",
"hand-held computer",
"handkerchief",
"hard disk drive",
"harmonica",
"harp",
"combine harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoop skirt",
"gymnastic horizontal bar",
"horse-drawn vehicle",
"hourglass",
"iPod",
"clothes iron",
"carved pumpkin",
"jeans",
"jeep",
"T-shirt",
"jigsaw puzzle",
"rickshaw",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop computer",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"ocean liner",
"lipstick",
"slip-on shoe",
"lotion",
"music speaker",
"loupe magnifying glass",
"sawmill",
"magnetic compass",
"messenger bag",
"mailbox",
"tights",
"one-piece bathing suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine cabinet",
"megalith",
"microphone",
"microwave oven",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"ford model t",
"modem",
"monastery",
"monitor",
"moped",
"mortar and pestle",
"graduation cap",
"mosque",
"mosquito net",
"vespa",
"mountain bike",
"tent",
"computer mouse",
"mousetrap",
"moving van",
"muzzle",
"metal nail",
"neck brace",
"necklace",
"baby pacifier",
"notebook computer",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"pipe organ",
"oscilloscope",
"overskirt",
"bullock cart",
"oxygen mask",
"product packet / packaging",
"paddle",
"paddle wheel",
"padlock",
"paintbrush",
"pajamas",
"palace",
"pan flute",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"railroad car",
"patio",
"payphone",
"pedestal",
"pencil case",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"plectrum",
"Pickelhaube",
"picket fence",
"pickup truck",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate ship",
"drink pitcher",
"block plane",
"planetarium",
"plastic bag",
"plate rack",
"farm plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"soda bottle",
"plant pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"missile",
"projector",
"hockey puck",
"punching bag",
"purse",
"quill",
"quilt",
"race car",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"fishing casting reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"eraser",
"rugby ball",
"ruler measuring stick",
"sneaker",
"safe",
"safety pin",
"salt shaker",
"sandal",
"sarong",
"saxophone",
"scabbard",
"weighing scale",
"school bus",
"schooner",
"scoreboard",
"CRT monitor",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe store",
"shoji screen / room divider",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"balaclava ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot machine",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar thermal collector",
"sombrero",
"soup bowl",
"keyboard space bar",
"space heater",
"space shuttle",
"spatula",
"motorboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"through arch bridge",
"steel drum",
"stethoscope",
"scarf",
"stone wall",
"stopwatch",
"stove",
"strainer",
"tram",
"stretcher",
"couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglasses",
"sunglasses",
"sunscreen",
"suspension bridge",
"mop",
"sweatshirt",
"swim trunks / shorts",
"swing",
"electrical switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy bear",
"television",
"tennis ball",
"thatched roof",
"front curtain",
"thimble",
"threshing machine",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toy store",
"tractor",
"semi-trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"hot tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright piano",
"vacuum cleaner",
"vase",
"vaulted or arched ceiling",
"velvet fabric",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"military aircraft",
"sink",
"washing machine",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"hair wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"airplane wing",
"wok",
"wooden spoon",
"wool",
"split-rail fence",
"shipwreck",
"sailboat",
"yurt",
"website",
"comic book",
"crossword",
"traffic or street sign",
"traffic light",
"dust jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"popsicle",
"baguette",
"bagel",
"pretzel",
"cheeseburger",
"hot dog",
"mashed potatoes",
"cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith apple",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"cherimoya (custard apple)",
"pomegranate",
"hay",
"carbonara",
"chocolate syrup",
"dough",
"meatloaf",
"pizza",
"pot pie",
"burrito",
"red wine",
"espresso",
"tea cup",
"eggnog",
"mountain",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeshore",
"promontory",
"sandbar",
"beach",
"valley",
"volcano",
"baseball player",
"bridegroom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"rose hip",
"horse chestnut seed",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn mushroom",
"earth star fungus",
"hen of the woods mushroom",
"bolete",
"corn cob",
"toilet paper",
]
openai_imagenet_template = [
lambda c: f"a bad photo of a {c}.",
lambda c: f"a photo of many {c}.",
lambda c: f"a sculpture of a {c}.",
lambda c: f"a photo of the hard to see {c}.",
lambda c: f"a low resolution photo of the {c}.",
lambda c: f"a rendering of a {c}.",
lambda c: f"graffiti of a {c}.",
lambda c: f"a bad photo of the {c}.",
lambda c: f"a cropped photo of the {c}.",
lambda c: f"a tattoo of a {c}.",
lambda c: f"the embroidered {c}.",
lambda c: f"a photo of a hard to see {c}.",
lambda c: f"a bright photo of a {c}.",
lambda c: f"a photo of a clean {c}.",
lambda c: f"a photo of a dirty {c}.",
lambda c: f"a dark photo of the {c}.",
lambda c: f"a drawing of a {c}.",
lambda c: f"a photo of my {c}.",
lambda c: f"the plastic {c}.",
lambda c: f"a photo of the cool {c}.",
lambda c: f"a close-up photo of a {c}.",
lambda c: f"a black and white photo of the {c}.",
lambda c: f"a painting of the {c}.",
lambda c: f"a painting of a {c}.",
lambda c: f"a pixelated photo of the {c}.",
lambda c: f"a sculpture of the {c}.",
lambda c: f"a bright photo of the {c}.",
lambda c: f"a cropped photo of a {c}.",
lambda c: f"a plastic {c}.",
lambda c: f"a photo of the dirty {c}.",
lambda c: f"a jpeg corrupted photo of a {c}.",
lambda c: f"a blurry photo of the {c}.",
lambda c: f"a photo of the {c}.",
lambda c: f"a good photo of the {c}.",
lambda c: f"a rendering of the {c}.",
lambda c: f"a {c} in a video game.",
lambda c: f"a photo of one {c}.",
lambda c: f"a doodle of a {c}.",
lambda c: f"a close-up photo of the {c}.",
lambda c: f"a photo of a {c}.",
lambda c: f"the origami {c}.",
lambda c: f"the {c} in a video game.",
lambda c: f"a sketch of a {c}.",
lambda c: f"a doodle of the {c}.",
lambda c: f"a origami {c}.",
lambda c: f"a low resolution photo of a {c}.",
lambda c: f"the toy {c}.",
lambda c: f"a rendition of the {c}.",
lambda c: f"a photo of the clean {c}.",
lambda c: f"a photo of a large {c}.",
lambda c: f"a rendition of a {c}.",
lambda c: f"a photo of a nice {c}.",
lambda c: f"a photo of a weird {c}.",
lambda c: f"a blurry photo of a {c}.",
lambda c: f"a cartoon {c}.",
lambda c: f"art of a {c}.",
lambda c: f"a sketch of the {c}.",
lambda c: f"a embroidered {c}.",
lambda c: f"a pixelated photo of a {c}.",
lambda c: f"itap of the {c}.",
lambda c: f"a jpeg corrupted photo of the {c}.",
lambda c: f"a good photo of a {c}.",
lambda c: f"a plushie {c}.",
lambda c: f"a photo of the nice {c}.",
lambda c: f"a photo of the small {c}.",
lambda c: f"a photo of the weird {c}.",
lambda c: f"the cartoon {c}.",
lambda c: f"art of the {c}.",
lambda c: f"a drawing of the {c}.",
lambda c: f"a photo of the large {c}.",
lambda c: f"a black and white photo of a {c}.",
lambda c: f"the plushie {c}.",
lambda c: f"a dark photo of a {c}.",
lambda c: f"itap of a {c}.",
lambda c: f"graffiti of the {c}.",
lambda c: f"a toy {c}.",
lambda c: f"itap of my {c}.",
lambda c: f"a photo of a cool {c}.",
lambda c: f"a photo of a small {c}.",
lambda c: f"a tattoo of the {c}.",
]
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/flava/data/imagenet_zeroshot_data.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import hashlib
import os
from typing import Optional
import torch
from torch import nn
def store_model_weights(
model: nn.Module,
checkpoint_path: str,
checkpoint_key: Optional[str] = None,
strict=True,
):
"""
This method can be used to prepare weights files for new models. It receives as
input a model architecture and a checkpoint from the training script and produces
a file with the weights ready for release.
Code reference:
https://github.com/pytorch/vision/blob/main/references/classification/utils.py
Args:
model (nn.Module): The model on which the weights will be loaded for validation purposes.
checkpoint_path (str): The path of the checkpoint we will load.
checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored.
For example, ``model`` is a common key used by many.
If ``None``, the checkpoint file is treated as model weights file. Default: ``None``.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
output_path (str): The location where the weights are saved.
"""
# Store the new model next to the checkpoint_path
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
# Deep copy to avoid side-effects on the model object.
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the weights to the model to validate that everything works
# and remove unnecessary weights (such as auxiliaries, etc)
# If no checkpoint_key is provided, the checkpoint_path is treated
# as the model weights file
if checkpoint_key is None:
model.load_state_dict(checkpoint, strict=strict)
else:
if checkpoint_key == "model_ema":
del checkpoint[checkpoint_key]["n_averaged"]
nn.modules.utils.consume_prefix_in_state_dict_if_present(
checkpoint[checkpoint_key], "module."
)
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
os.replace(tmp_path, output_path)
return output_path
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/utils/common.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This file is modified from:
# https://github.com/pytorch/vision/blob/main/references/classification/utils.py
import datetime
import errno
import os
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = reduce_across_processes([self.count, self.total])
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
class MetricLogger:
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{attr}'"
)
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(f"{name}: {str(meter)}")
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
one_mb = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / one_mb,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"{header} Total time: {total_time_str}")
class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
"""Maintains moving averages of model parameters using an exponential decay.
``ema_avg = decay * avg_model_param + (1 - decay) * model_param``
`torch.optim.swa_utils.AveragedModel <https://pytorch.org/docs/stable/optim.html#custom-averaging-strategies>`_
is used to compute the EMA.
"""
def __init__(self, model, decay, device="cpu"):
def ema_avg(avg_model_param, model_param, num_averaged):
return decay * avg_model_param + (1 - decay) * model_param
super().__init__(model, device, ema_avg, use_buffers=True)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
target = target.max(dim=1)[1]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def reduce_across_processes(val, op=dist.ReduceOp.SUM):
if not is_dist_avail_and_initialized():
# nothing to sync, but we still convert to tensor for consistency with the distributed case.
return torch.tensor(val)
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t, op=op)
return t
def set_weight_decay(
model: torch.nn.Module,
weight_decay: float,
):
params = []
def _add_params(module, prefix=""):
for name, p in module.named_parameters(recurse=False):
if not p.requires_grad:
continue
params.append(p)
for child_name, child_module in module.named_children():
child_prefix = f"{prefix}.{child_name}" if prefix != "" else child_name
_add_params(child_module, prefix=child_prefix)
_add_params(model)
param_groups = [{"params": params, "weight_decay": weight_decay}]
return param_groups
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Based on https://github.com/pytorch/vision/blob/main/references/classification/train.py
import argparse
import datetime
import logging
import os
import time
import omnivore.data.data_builder as data_builder
import torch
import torch.utils.data
import torchmultimodal.models.omnivore as omnivore
from omnivore import utils
from torch import nn
logger = None
def _chunk_forward_backward(
model,
image,
target,
input_type,
chunk_start,
chunk_end,
realized_accum_iter,
criterion,
optimizer,
device,
args,
scaler=None,
):
chunk_image, chunk_target = image[chunk_start:chunk_end, ...].to(device), target[
chunk_start:chunk_end, ...
].to(device)
with torch.cuda.amp.autocast(enabled=scaler is not None):
chunk_output = model(chunk_image, input_type)
loss = criterion(chunk_output, chunk_target)
# Normalize the loss
loss /= realized_accum_iter
if scaler is not None:
scaler.scale(loss).backward()
if args.clip_grad_norm is not None:
# we should unscale the gradients of optimizer's assigned params if do gradient clipping
scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
else:
loss.backward()
if args.clip_grad_norm is not None:
nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)
return loss, chunk_output
def train_one_epoch(
model,
criterion,
optimizer,
data_loader,
device,
epoch,
args,
model_ema=None,
scaler=None,
):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}"))
data_loader.set_epoch(epoch, is_distributed=args.distributed)
header = f"Epoch: [{epoch}]"
for i, (batch_data, input_type) in enumerate(
metric_logger.log_every(data_loader, args.print_freq, header)
):
image, target = batch_data[:2]
# If input_type is video, we will do "gradient accumulation" to reduce gpu memory usage
# Each forward-backward call will be done on smaller chunk_size where chunk_size
# is roughly batch_size divided by number of accumulation iteration
accum_iter = 1
if input_type == "video":
accum_iter = args.video_grad_accum_iter
start_time = time.time()
batch_size = image.shape[0]
chunk_start = 0
# We rounding up chunk_size and realized_accum_iter in case the batch size
# is not divisible by accum_iter
chunk_size = (batch_size + accum_iter - 1) // accum_iter
realized_accum_iter = (batch_size + chunk_size - 1) // chunk_size
all_chunk_outputs = []
accum_loss = 0
for chunk_num in range(realized_accum_iter):
chunk_end = chunk_start + chunk_size
if args.distributed and chunk_num < realized_accum_iter - 1:
# We dont synchronized unless it is the last chunk in DDP mode
with model.no_sync():
loss, chunk_output = _chunk_forward_backward(
model,
image,
target,
input_type,
chunk_start,
chunk_end,
realized_accum_iter,
criterion,
optimizer,
device,
args,
scaler,
)
else:
loss, chunk_output = _chunk_forward_backward(
model,
image,
target,
input_type,
chunk_start,
chunk_end,
realized_accum_iter,
criterion,
optimizer,
device,
args,
scaler,
)
all_chunk_outputs.append(chunk_output)
accum_loss += loss.item()
chunk_start = chunk_end
# Weight update
if scaler is not None:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
if model_ema and i % args.model_ema_steps == 0:
model_ema.update_parameters(model)
if epoch < args.lr_warmup_epochs:
# Reset ema buffer to keep copying weights during warmup period
model_ema.n_averaged.fill_(0)
output = torch.cat(all_chunk_outputs, dim=0)
target = target.to(device)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
metric_logger.update(loss=accum_loss, lr=optimizer.param_groups[0]["lr"])
metric_logger.meters[f"{input_type}_acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters[f"{input_type}_acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
def evaluate(
model, criterion, data_loader, device, args, print_freq=100, log_suffix=""
):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f"Test: {log_suffix}"
for i, modality in enumerate(args.modalities):
if modality == "video":
# We aggregate predictions of all clips per video to get video-level accuracy
# For this, we prepare a tensor to contain the aggregation
num_videos = len(data_loader.data_loaders[i].dataset.samples)
num_video_classes = len(data_loader.data_loaders[i].dataset.classes)
agg_preds = torch.zeros(
(num_videos, num_video_classes), dtype=torch.float32, device=device
)
agg_targets = torch.zeros((num_videos), dtype=torch.int32, device=device)
num_processed_samples = 0
with torch.inference_mode():
for batch_data, input_type in metric_logger.log_every(
data_loader, print_freq, header
):
image, target = batch_data[:2]
if input_type == "video":
video_idx = batch_data[2]
# We do the evaluation in chunks to reduce memory usage for video
accum_iter = 1
if input_type == "video":
accum_iter = args.video_grad_accum_iter
batch_size = image.shape[0]
chunk_start = 0
chunk_size = (batch_size + accum_iter - 1) // accum_iter
realized_accum_iter = (batch_size + chunk_size - 1) // chunk_size
accum_loss = 0
all_chunk_outputs = []
for chunk_num in range(realized_accum_iter):
chunk_end = chunk_start + chunk_size
chunk_image = image[chunk_start:chunk_end, ...].to(
device, non_blocking=True
)
chunk_target = target[chunk_start:chunk_end, ...].to(
device, non_blocking=True
)
chunk_output = model(chunk_image, input_type)
loss = criterion(chunk_output, chunk_target)
accum_loss += loss.item()
all_chunk_outputs.append(chunk_output)
chunk_start = chunk_end
output = torch.cat(all_chunk_outputs, dim=0)
target = target.to(device, non_blocking=True)
if input_type == "video":
# Aggregate the prediction softmax and label for video-level accuracy
preds = torch.softmax(output, dim=1)
for batch_num in range(batch_size):
idx = video_idx[batch_num].item()
agg_preds[idx] += preds[batch_num].detach()
agg_targets[idx] = target[batch_num].detach().item()
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
metric_logger.update(loss=accum_loss)
metric_logger.meters[f"{input_type}_acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters[f"{input_type}_acc5"].update(acc5.item(), n=batch_size)
num_processed_samples += batch_size
# gather the stats from all processes
metric_logger.synchronize_between_processes()
for modality in args.modalities:
try:
acc1 = getattr(metric_logger, f"{modality}_acc1").global_avg
acc5 = getattr(metric_logger, f"{modality}_acc5").global_avg
if modality == "video":
# Reduce the agg_preds and agg_targets from all gpu and show result
agg_preds = utils.reduce_across_processes(agg_preds)
agg_targets = utils.reduce_across_processes(
agg_targets, op=torch.distributed.ReduceOp.MAX
)
agg_acc1, agg_acc5 = utils.accuracy(agg_preds, agg_targets, topk=(1, 5))
logger.info(f"{header} Clip Acc@1 {acc1:.3f} Clip Acc@5 {acc5:.3f}")
logger.info(
f"{header} Video Acc@1 {agg_acc1:.3f} Video Acc@5 {agg_acc5:.3f}"
)
else:
logger.info(
f"{header} {modality} Acc@1 {acc1:.3f} {modality} Acc@5 {acc5:.3f}"
)
except Exception as e:
# Handle edge case of modality with sampling_factor 0
logger.warning(str(e))
def main(args):
log_numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(log_numeric_level, int):
raise ValueError(f"Invalid log level: {log_leve}")
log_format = "[%(asctime)s] %(levelname)s - %(message)s"
logging.basicConfig(format=log_format, level=log_numeric_level)
global logger
logger = logging.getLogger(__name__)
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
logger.info(args)
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
logger.info(f"Creating model: {args.model}")
model = getattr(omnivore, args.model)(pretrained=args.pretrained)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss(label_smoothing=args.label_smoothing)
parameters = utils.set_weight_decay(
model,
args.weight_decay,
)
if args.opt in ["sgd", "sgd_nesterov"]:
optimizer = torch.optim.SGD(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=(args.opt == "sgd_nesterov"),
)
elif args.opt == "rmsprop":
optimizer = torch.optim.RMSprop(
parameters,
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
eps=0.0316,
alpha=0.9,
)
elif args.opt == "adamw":
optimizer = torch.optim.AdamW(
parameters, lr=args.lr, weight_decay=args.weight_decay
)
scaler = torch.cuda.amp.GradScaler() if args.amp else None
if args.lr_scheduler == "steplr":
main_lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma
)
elif args.lr_scheduler == "cosineannealinglr":
main_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=args.epochs - args.lr_warmup_epochs, eta_min=args.lr_min
)
elif args.lr_scheduler == "exponentiallr":
main_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma=args.lr_gamma
)
if args.lr_warmup_epochs > 0:
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer,
start_factor=args.lr_warmup_decay,
total_iters=args.lr_warmup_epochs,
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer,
factor=args.lr_warmup_decay,
total_iters=args.lr_warmup_epochs,
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer,
schedulers=[warmup_lr_scheduler, main_lr_scheduler],
milestones=[args.lr_warmup_epochs],
)
else:
lr_scheduler = main_lr_scheduler
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True
)
model_without_ddp = model.module
model_ema = None
if args.model_ema:
# Decay adjustment that aims to keep the decay independent from other hyper-parameters originally proposed at:
# https://github.com/facebookresearch/pycls/blob/f8cd9627/pycls/core/net.py#L123
#
# total_ema_updates = (Dataset_size / n_GPUs) * epochs / (batch_size_per_gpu * EMA_steps)
# We consider constant = Dataset_size for a given dataset/setup and ommit it. Thus:
# adjust = 1 / total_ema_updates ~= n_GPUs * batch_size_per_gpu * EMA_steps / epochs
adjust = args.world_size * args.batch_size * args.model_ema_steps / args.epochs
alpha = 1.0 - args.model_ema_decay
alpha = min(1.0, alpha * adjust)
model_ema = utils.ExponentialMovingAverage(
model_without_ddp, device=device, decay=1.0 - alpha
)
start_epoch = 0
if args.resume:
logger.info(f"Resuming from checkpoint: {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
if not args.test_only:
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
start_epoch = checkpoint["epoch"] + 1
if model_ema:
model_ema.load_state_dict(checkpoint["model_ema"])
if scaler:
scaler.load_state_dict(checkpoint["scaler"])
val_data_loader = data_builder.get_omnivore_data_loader(mode="val", args=args)
if args.test_only:
# We disable the cudnn benchmarking because it can noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if model_ema:
evaluate(
model_ema,
criterion,
val_data_loader,
device=device,
args=args,
log_suffix="EMA",
)
else:
evaluate(model, criterion, val_data_loader, device=device, args=args)
return
logger.info("Start training")
train_data_loader = data_builder.get_omnivore_data_loader(mode="train", args=args)
start_time = time.time()
for epoch in range(start_epoch, args.epochs):
train_one_epoch(
model,
criterion,
optimizer,
train_data_loader,
device,
epoch,
args,
model_ema,
scaler,
)
lr_scheduler.step()
if epoch % args.eval_every_num_epoch == args.eval_every_num_epoch - 1:
evaluate(model, criterion, val_data_loader, device=device, args=args)
if model_ema:
evaluate(
model_ema,
criterion,
val_data_loader,
device=device,
args=args,
log_suffix="EMA",
)
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if model_ema:
checkpoint["model_ema"] = model_ema.state_dict()
if scaler:
checkpoint["scaler"] = scaler.state_dict()
if (
epoch % args.save_checkpoint_every_num_epoch
== args.save_checkpoint_every_num_epoch - 1
):
utils.save_on_master(
checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")
)
utils.save_on_master(
checkpoint, os.path.join(args.output_dir, "checkpoint.pth")
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info(f"Training time {total_time_str}")
def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(
description="Torchmultimodal Omnivore Training", add_help=add_help
)
parser.add_argument(
"--device",
default="cuda",
type=str,
help="device (Use cuda or cpu Default: cuda)",
)
parser.add_argument(
"--model",
default="omnivore_swin_t",
type=str,
help="Model name. Default: 'omnivore_swin_t'",
)
parser.add_argument(
"-b",
"--batch-size",
default=64,
type=int,
help="images per gpu, the total batch size is $NGPU x batch_size (default: 64)",
)
parser.add_argument(
"--epochs",
default=500,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"-j",
"--workers",
default=16,
type=int,
metavar="N",
help="number of training data loading workers (default: 16)",
)
parser.add_argument(
"--opt",
default="adamw",
choices=["sgd", "sgd_nesterov", "rmsprop", "adamw"],
type=str,
help="optimizer",
)
parser.add_argument("--lr", default=0.002, type=float, help="initial learning rate")
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--wd",
"--weight-decay",
default=0.05,
type=float,
metavar="W",
help="weight decay (default: 0.05)",
dest="weight_decay",
)
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
help="label smoothing (default: 0.0)",
dest="label_smoothing",
)
parser.add_argument(
"--mixup-alpha", default=0.0, type=float, help="mixup alpha (default: 0.0)"
)
parser.add_argument(
"--cutmix-alpha", default=0.0, type=float, help="cutmix alpha (default: 0.0)"
)
parser.add_argument(
"--lr-scheduler",
default="cosineannealinglr",
choices=["steplr", "cosineannealinglr", "exponentiallr"],
type=str,
help="the lr scheduler (default: cosineannealinglr)",
)
parser.add_argument(
"--lr-warmup-epochs",
default=0,
type=int,
help="the number of epochs to warmup (default: 0)",
)
parser.add_argument(
"--lr-warmup-method",
default="linear",
choices=["linear", "constant"],
type=str,
help="the warmup method (default: linear)",
)
parser.add_argument(
"--lr-warmup-decay", default=0.01, type=float, help="the decay for lr"
)
parser.add_argument(
"--lr-step-size",
default=30,
type=int,
help="decrease lr every step-size epochs",
)
parser.add_argument(
"--lr-gamma",
default=0.1,
type=float,
help="decrease lr by a factor of lr-gamma",
)
parser.add_argument(
"--lr-min",
default=0.0,
type=float,
help="minimum lr of lr schedule (default: 0.0)",
)
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument(
"--output-dir", default=".", type=str, help="path to save outputs"
)
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument(
"--sync-bn",
dest="sync_bn",
help="Use sync batch norm",
action="store_true",
)
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
parser.add_argument(
"--random-erase",
default=0.0,
type=float,
help="random erasing probability (default: 0.0)",
)
# Mixed precision training parameters
parser.add_argument(
"--amp",
action="store_true",
help="Use torch.cuda.amp for mixed precision training",
)
# distributed training parameters
parser.add_argument(
"--world-size", default=1, type=int, help="number of distributed processes"
)
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--model-ema",
action="store_true",
help="enable tracking Exponential Moving Average of model parameters",
)
parser.add_argument(
"--model-ema-steps",
type=int,
default=1,
help="the number of iterations that controls how often to update the EMA model (default: 1)",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.9999,
help="decay factor for Exponential Moving Average of model parameters (default: 0.9999)",
)
parser.add_argument(
"--use-deterministic-algorithms",
action="store_true",
help="Forces the use of deterministic algorithms only.",
)
parser.add_argument(
"--val-resize-size",
default=256,
type=int,
help="the resize size used for validation (default: 256)",
)
parser.add_argument(
"--val-crop-size",
default=224,
type=int,
help="the central crop size used for validation (default: 224)",
)
parser.add_argument(
"--train-crop-size",
default=224,
type=int,
help="the random crop size used for training (default: 224)",
)
parser.add_argument(
"--clip-grad-norm",
default=None,
type=float,
help="the maximum gradient norm (default None)",
)
parser.add_argument(
"--weights", default=None, type=str, help="the weights enum name to load"
)
parser.add_argument(
"--train-resize-size",
default=256,
type=int,
help="the resize size used for training (default: 256)",
)
parser.add_argument(
"--imagenet-data-path", type=str, help="Root directory path of imagenet dataset"
)
parser.add_argument(
"--kinetics-data-path", type=str, help="Root directory path of kinetics dataset"
)
parser.add_argument(
"--sunrgbd-data-path", type=str, help="Root directory path of sunrgbd dataset"
)
parser.add_argument(
"--cache-video-dataset",
dest="cache_video_dataset",
help="Cache the video datasets for quicker initialization. It also serializes the transforms",
action="store_true",
)
parser.add_argument(
"--train-clips-per-video",
default=1,
type=int,
help="maximum number of clips per video to consider during training",
)
parser.add_argument(
"--val-clips-per-video",
default=4,
type=int,
help="maximum number of clips per video to consider during validation",
)
parser.add_argument(
"--kinetics-dataset-workers",
default=24,
type=int,
help="number of worker to build kinetics dataset (default=24)",
)
parser.add_argument(
"--extra-video-dataloader-workers",
default=8,
type=int,
help="number of additional video data loader workers (default=8)",
)
parser.add_argument(
"--eval-every-num-epoch",
default=1,
type=int,
help="Number of epoch between each evaluation on validation dataset",
)
parser.add_argument(
"--modalities",
default=["image", "video", "rgbd"],
type=str,
nargs="+",
help="Modalities that will be used in training",
)
parser.add_argument(
"--val-data-sampling-factor",
default=[1.0, 1.0, 1.0],
type=float,
nargs="+",
help="Sampling factor for validation data for each modality",
)
parser.add_argument(
"--train-data-sampling-factor",
default=[1.0, 1.0, 10.0],
type=float,
nargs="+",
help="Samping factor for training data for each modality",
)
parser.add_argument(
"--loader-pin-memory",
help="Pin_memory parameter in data_loader",
action="store_true",
)
parser.add_argument(
"--color-jitter-factor",
default=[0.1, 0.1, 0.1, 0.1],
type=float,
nargs=4,
help="Color jitter factor in brightness, contrast, saturation, and hue",
)
parser.add_argument(
"--video-grad-accum-iter",
type=int,
default=32,
help="Number of gradient accumulation iteration to reduce batch size for video",
)
parser.add_argument(
"--loader-drop-last",
action="store_true",
help="Drop last parameter in DataLoader",
)
parser.add_argument(
"--val-num-worker-ratio",
default=0.5,
type=float,
help="Ratio between evaluation and training data loader workers number",
)
parser.add_argument("--log-level", default="INFO", type=str, help="Log level")
parser.add_argument(
"--pretrained", action="store_true", help="Start model with pretrained weight"
)
parser.add_argument(
"--save-checkpoint-every-num-epoch",
default=1,
type=int,
help="Save checkpoint every specified number of epoch",
)
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/train.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Modified from https://github.com/pytorch/vision/blob/main/references/classification/transforms.py
import math
from typing import Tuple
import numpy as np
import torch
import torchvision.transforms as T
from torch import Tensor
class RandomMixup(torch.torch.nn.Module):
"""Randomly apply Mixup to the provided batch and targets.
The class implements the data augmentations as described in the paper
`"mixup: Beyond Empirical Risk Minimization" <https://arxiv.org/abs/1710.09412>`_.
Args:
num_classes (int): number of classes used for one-hot encoding.
p (float): probability of the batch being transformed. Default value is 0.5.
alpha (float): hyperparameter of the Beta distribution used for mixup.
Default value is 1.0.
inplace (bool): boolean to make this transform inplace. Default set to False.
"""
def __init__(
self,
num_classes: int,
p: float = 0.5,
alpha: float = 1.0,
inplace: bool = False,
) -> None:
super().__init__()
if num_classes < 1:
raise ValueError(
f"Please provide a valid positive value for the num_classes. Got num_classes={num_classes}"
)
if alpha <= 0:
raise ValueError("Alpha param can't be zero.")
self.num_classes = num_classes
self.p = p
self.alpha = alpha
self.inplace = inplace
def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""
Args:
batch (Tensor): Float tensor of size (..., H, W)
target (Tensor): Integer tensor of size (B, )
Returns:
Tensor: Randomly transformed batch.
"""
if batch.ndim < 4:
raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}")
if target.ndim != 1:
raise ValueError(f"Target ndim should be 1. Got {target.ndim}")
if not batch.is_floating_point():
raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.")
if target.dtype != torch.int64:
raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}")
if not self.inplace:
batch = batch.clone()
target = target.clone()
if target.ndim == 1:
target = torch.nn.functional.one_hot(
target, num_classes=self.num_classes
).to(dtype=batch.dtype)
if torch.rand(1).item() >= self.p:
return batch, target
# It's faster to roll the batch by one instead of shuffling it to create image pairs
batch_rolled = batch.roll(1, 0)
target_rolled = target.roll(1, 0)
# Implemented as on mixup paper, page 3.
lambda_param = float(
torch._sample_dirichlet(torch.tensor([self.alpha, self.alpha]))[0]
)
batch_rolled.mul_(1.0 - lambda_param)
batch.mul_(lambda_param).add_(batch_rolled)
target_rolled.mul_(1.0 - lambda_param)
target.mul_(lambda_param).add_(target_rolled)
return batch, target
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"num_classes={self.num_classes}"
f", p={self.p}"
f", alpha={self.alpha}"
f", inplace={self.inplace}"
f")"
)
return s
class RandomCutmix(torch.torch.nn.Module):
"""Randomly apply Cutmix to the provided batch and targets.
The class implements the data augmentations as described in the paper
`"CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features"
<https://arxiv.org/abs/1905.04899>`_.
Args:
num_classes (int): number of classes used for one-hot encoding.
p (float): probability of the batch being transformed. Default value is 0.5.
alpha (float): hyperparameter of the Beta distribution used for cutmix.
Default value is 1.0.
inplace (bool): boolean to make this transform inplace. Default set to False.
"""
def __init__(
self,
num_classes: int,
p: float = 0.5,
alpha: float = 1.0,
inplace: bool = False,
) -> None:
super().__init__()
if num_classes < 1:
raise ValueError(
"Please provide a valid positive value for the num_classes."
)
if alpha <= 0:
raise ValueError("Alpha param can't be zero.")
self.num_classes = num_classes
self.p = p
self.alpha = alpha
self.inplace = inplace
def forward(self, batch: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
"""
Args:
batch (Tensor): Float tensor of size (..., H, W)
target (Tensor): Integer tensor of size (B, )
Returns:
Tensor: Randomly transformed batch.
"""
if batch.ndim < 4:
raise ValueError(f"Batch ndim should be 4. Got {batch.ndim}")
if target.ndim != 1:
raise ValueError(f"Target ndim should be 1. Got {target.ndim}")
if not batch.is_floating_point():
raise TypeError(f"Batch dtype should be a float tensor. Got {batch.dtype}.")
if target.dtype != torch.int64:
raise TypeError(f"Target dtype should be torch.int64. Got {target.dtype}")
if not self.inplace:
batch = batch.clone()
target = target.clone()
if target.ndim == 1:
target = torch.nn.functional.one_hot(
target, num_classes=self.num_classes
).to(dtype=batch.dtype)
if torch.rand(1).item() >= self.p:
return batch, target
# It's faster to roll the batch by one instead of shuffling it to create image pairs
batch_rolled = batch.roll(1, 0)
target_rolled = target.roll(1, 0)
# Implemented as on cutmix paper, page 12 (with minor corrections on typos).
lambda_param = float(
torch._sample_dirichlet(torch.tensor([self.alpha, self.alpha]))[0]
)
h, w = batch.shape[-2:]
r_x = torch.randint(w, (1,))
r_y = torch.randint(h, (1,))
r = 0.5 * math.sqrt(1.0 - lambda_param)
r_w_half = int(r * w)
r_h_half = int(r * h)
x1 = int(torch.clamp(r_x - r_w_half, min=0))
y1 = int(torch.clamp(r_y - r_h_half, min=0))
x2 = int(torch.clamp(r_x + r_w_half, max=w))
y2 = int(torch.clamp(r_y + r_h_half, max=h))
batch[..., y1:y2, x1:x2] = batch_rolled[..., y1:y2, x1:x2]
lambda_param = float(1.0 - (x2 - x1) * (y2 - y1) / (w * h))
target_rolled.mul_(1.0 - lambda_param)
target.mul_(lambda_param).add_(target_rolled)
return batch, target
def __repr__(self) -> str:
s = (
f"{self.__class__.__name__}("
f"num_classes={self.num_classes}"
f", p={self.p}"
f", alpha={self.alpha}"
f", inplace={self.inplace}"
f")"
)
return s
class Unsqueeze(torch.torch.nn.Module):
def __init__(self, pos=0):
super().__init__()
self.pos = pos
def forward(self, x):
return x.unsqueeze(self.pos)
class ConvertTCHWtoCTHW(torch.nn.Module):
"""Convert tensor from (T, C, H, W) to (C, T, H, W)"""
def forward(self, vid: torch.Tensor) -> torch.Tensor:
return vid.permute(1, 0, 2, 3)
class ColorJitter3d(T.ColorJitter):
def forward(self, img):
assert isinstance(img, torch.Tensor)
img[:3, :, :] = super().forward(img[:3, :, :])
return img
# From original implementation
# https://www.internalfb.com/code/fbsource/[f1a98f41bcce7ee621f0248a6e0235a3e3dea628]/
# fbcode/deeplearning/projects/omnivore/vissl/data/ssl_transforms/depth_T.py?lines=13
class DropChannels(torch.nn.Module):
"""
Drops Channels with predefined probability values.
Pads the dropped channels with `pad_value`.
Channels can be tied using `tie_channels`
For example, for RGBD input, RGB can be tied by using `tie_channels=[0,1,2]`.
In this case, channels [0,1,2] will be dropped all at once or not at all.
Assumes input is of the form CxHxW or TxCxHxW
"""
def __init__(
self, channel_probs, fill_values, tie_channels=None, all_channel_drop=False
):
"""
channel_probs: List of probabilities
fill_values: List of values to fill the dropped channels with
tie_channels: List of indices. Tie dropping of certain channels.
all_channel_drop: Bool variable to prevent cases where all channels are dropped.
"""
super().__init__()
channel_probs = np.array(channel_probs, dtype=np.float32)
self.channel_probs = channel_probs
self.fill_values = fill_values
self.tie_channels = tie_channels
self.all_channel_drop = all_channel_drop
if tie_channels is not None:
tie_probs = [channel_probs[x] for x in tie_channels]
assert len(set(tie_probs)) == 1, "All tie_channel probs must be equal"
def forward(self, x):
assert isinstance(x, torch.Tensor)
if x.ndim == 3:
# CxHxW
num_channels = x.shape[0]
channel_index = 0
elif x.ndim == 4:
# TxCxHxW
num_channels = x.shape[1]
channel_index = 1
else:
raise ValueError(f"Unexpected number of dims {x.ndim}. Expected 3 or 4.")
assert num_channels == len(
self.channel_probs
), f"channel_probs is {len(self.channel_probs)} but got {num_channels} channels"
to_drop = [
np.random.random() < self.channel_probs[c] for c in range(num_channels)
]
if self.tie_channels is not None:
first_drop = to_drop[self.tie_channels[0]]
for idx in self.tie_channels[1:]:
to_drop[idx] = first_drop
if all(to_drop) and self.all_channel_drop is False:
# all channels will be dropped, prevent it
to_drop = [False for _ in range(num_channels)]
for c in range(num_channels):
if not to_drop[c]:
continue
if channel_index == 0:
x[c, ...] = self.fill_values[c]
elif channel_index == 1:
x[:, c, ...] = self.fill_values[c]
else:
raise NotImplementedError()
return x
# From original implementation:
# https://github.com/facebookresearch/omnivore/blob/main/omnivore/T.py#L16
class DepthNorm(torch.nn.Module):
"""
Normalize the depth channel: in an RGBD input of shape (4, H, W),
only the last channel is modified.
The depth channel is also clamped at 0.0. The Midas depth prediction
model outputs inverse depth maps - negative values correspond
to distances far away so can be clamped at 0.0
"""
def __init__(
self,
max_depth: float,
clamp_max_before_scale: bool = False,
min_depth: float = 0.01,
):
"""
Args:
max_depth (float): The max value of depth for the dataset
clamp_max (bool): Whether to clamp to max_depth or to divide by max_depth
"""
super().__init__()
if max_depth < 0.0:
raise ValueError("max_depth must be > 0; got %.2f" % max_depth)
self.max_depth = max_depth
self.clamp_max_before_scale = clamp_max_before_scale
self.min_depth = min_depth
def forward(self, image: torch.Tensor):
c, h, w = image.shape
if c != 4:
err_msg = (
f"This transform is for 4 channel RGBD input only; got {image.shape}"
)
raise ValueError(err_msg)
color_img = image[:3, ...] # (3, H, W)
depth_img = image[3:4, ...] # (1, H, W)
# Clamp to 0.0 to prevent negative depth values
depth_img = depth_img.clamp(min=self.min_depth)
# divide by max_depth
if self.clamp_max_before_scale:
depth_img = depth_img.clamp(max=self.max_depth)
depth_img /= self.max_depth
img = torch.cat([color_img, depth_img], dim=0)
return img
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import omnivore.data.transforms as CT # custom transforms
import torch
import torchvision.transforms as T
from omnivore.data.rand_aug3d import RandAugment3d
from torchvision.transforms.functional import InterpolationMode
# Image presets
class ImageNetClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BICUBIC,
hflip_prob=0.5,
random_erase_prob=0.0,
color_jitter_factor=(0.1, 0.1, 0.1, 0.1),
):
transform_list = [T.RandomResizedCrop(crop_size, interpolation=interpolation)]
if hflip_prob > 0:
transform_list.append(T.RandomHorizontalFlip(hflip_prob))
transform_list.extend(
[
T.autoaugment.RandAugment(interpolation=interpolation),
T.ColorJitter(*color_jitter_factor),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
]
)
if random_erase_prob > 0:
transform_list.append(T.RandomErasing(p=random_erase_prob))
# For omnivore to make the image look like a video with C D H W layout
transform_list.append(CT.Unsqueeze(pos=1))
self.transforms = T.Compose(transform_list)
def __call__(self, img):
return self.transforms(img)
class ImageNetClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
interpolation=InterpolationMode.BICUBIC,
):
self.transforms = T.Compose(
[
T.Resize(resize_size, interpolation=interpolation),
T.CenterCrop(crop_size),
T.PILToTensor(),
T.ConvertImageDtype(torch.float),
T.Normalize(mean=mean, std=std),
# For omnivore to make the image look like a video with C D H W layout
CT.Unsqueeze(pos=1),
]
)
def __call__(self, img):
return self.transforms(img)
# Video presets
class VideoClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
resize_size,
mean=(0.43216, 0.394666, 0.37645),
std=(0.22803, 0.22145, 0.216989),
hflip_prob=0.5,
):
transform_list = [
T.ConvertImageDtype(torch.float32),
T.Resize(resize_size),
]
if hflip_prob > 0:
transform_list.append(T.RandomHorizontalFlip(hflip_prob))
transform_list.extend(
[
T.Normalize(mean=mean, std=std),
T.RandomCrop(crop_size),
CT.ConvertTCHWtoCTHW(),
]
)
self.transforms = T.Compose(transform_list)
def __call__(self, x):
return self.transforms(x)
class VideoClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size,
mean=(0.43216, 0.394666, 0.37645),
std=(0.22803, 0.22145, 0.216989),
):
self.transforms = T.Compose(
[
T.ConvertImageDtype(torch.float32),
T.Resize(resize_size),
T.Normalize(mean=mean, std=std),
T.CenterCrop(crop_size),
CT.ConvertTCHWtoCTHW(),
]
)
def __call__(self, x):
return self.transforms(x)
# Depth Presets
class DepthClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
max_depth=75,
mean=(0.485, 0.456, 0.406, 0.0418),
std=(0.229, 0.224, 0.225, 0.0295),
interpolation=InterpolationMode.BILINEAR,
hflip_prob=0.5,
random_erase_prob=0.0,
color_jitter_factor=(0.1, 0.1, 0.1, 0.1),
):
transform_list = [
CT.DepthNorm(max_depth=max_depth, clamp_max_before_scale=True),
T.RandomResizedCrop(crop_size, interpolation=interpolation),
]
if hflip_prob > 0:
transform_list.append(T.RandomHorizontalFlip(hflip_prob))
transform_list.extend(
[
RandAugment3d(interpolation=interpolation, num_ops=1),
CT.ColorJitter3d(*color_jitter_factor),
]
)
if random_erase_prob > 0:
transform_list.append(T.RandomErasing(p=random_erase_prob))
transform_list.append(T.Normalize(mean=mean, std=std))
transform_list.append(
CT.DropChannels(
channel_probs=[0.5, 0.5, 0.5, 0],
tie_channels=[0, 1, 2],
fill_values=[0, 0, 0, 0],
)
)
# For omnivore to make the rgbd look like video with C D H W layout
transform_list.append(CT.Unsqueeze(pos=1))
self.transforms = T.Compose(transform_list)
def __call__(self, img):
return self.transforms(img)
class DepthClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
max_depth=75,
mean=(0.485, 0.456, 0.406, 0.0418),
std=(0.229, 0.224, 0.225, 0.0295),
interpolation=InterpolationMode.BILINEAR,
):
self.transforms = T.Compose(
[
CT.DepthNorm(max_depth=max_depth, clamp_max_before_scale=True),
T.Resize(resize_size, interpolation=interpolation),
T.CenterCrop(crop_size),
T.Normalize(mean=mean, std=std),
# For omnivore to make the depth image look like video with C D H W layout
CT.Unsqueeze(pos=1),
]
)
def __call__(self, img):
return self.transforms(img)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/presets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import PIL
import scipy.io
import torch
import torchvision
import torchvision.transforms as T
from torchvision.datasets.vision import VisionDataset
class OmnivoreKinetics(torchvision.datasets.kinetics.Kinetics):
def __getitem__(self, idx):
video, audio, info, video_idx = self.video_clips.get_clip(idx)
label = self.samples[video_idx][1]
if self.transform is not None:
video = self.transform(video)
return video, label, video_idx
class OmnivoreSunRgbdDatasets(VisionDataset):
def __init__(self, root, transform=None, target_transform=None, split="train"):
super().__init__(root, transform=transform, target_transform=target_transform)
self._data_dir = Path(self.root) / "SUNRGBD"
self._meta_dir = Path(self.root) / "SUNRGBDtoolbox"
if not self._check_exists():
raise RuntimeError(
f"data_dir: {self._data_dir} or meta_dir: {self._meta_dir} not found"
)
# Get the param from https://github.com/facebookresearch/omnivore/issues/12
self.sensor_to_params = {
"kv1": {
"baseline": 0.075,
},
"kv1_b": {
"baseline": 0.075,
},
"kv2": {
"baseline": 0.075,
},
"realsense": {
"baseline": 0.095,
},
"xtion": {
"baseline": 0.095, # guessed based on length of 18cm for ASUS xtion v1
},
}
# Omnivore only use these selected 19 classes
self.classes = [
"bathroom",
"bedroom",
"classroom",
"computer_room",
"conference_room",
"corridor",
"dining_area",
"dining_room",
"discussion_area",
"furniture_store",
"home_office",
"kitchen",
"lab",
"lecture_theatre",
"library",
"living_room",
"office",
"rest_space",
"study_space",
]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
allsplit_filepath = self._meta_dir / "traintestSUNRGBD/allsplit.mat"
allsplit_mat = scipy.io.loadmat(allsplit_filepath)
# The original filepath on the "allsplit.mat" has the prefix from author machine that need to be replaced
ori_prefix = "/n/fs/sun3d/data/SUNRGBD/"
if split == "train":
self.image_dirs = [
self._data_dir / x[0][len(ori_prefix) :]
for x in allsplit_mat["alltrain"][0]
]
elif split == "val":
self.image_dirs = [
self._data_dir / x[0][len(ori_prefix) :]
for x in allsplit_mat["alltest"][0]
]
# Filter to use only chosen 19 classes
self.image_dirs = [
x
for x in self.image_dirs
if self._get_sunrgbd_scene_class(x) in self.class_to_idx
]
self._to_tensor = T.ToTensor()
def _check_exists(self):
return self._data_dir.is_dir() and self._meta_dir.is_dir()
def __len__(self):
return len(self.image_dirs)
def _get_disparity_tensor(self, image_dir):
# Using depth_bfx, but maybe can also consider just using depth
image_dir = Path(image_dir)
depth_dir = image_dir / "depth_bfx"
intrinsics_file = image_dir / "intrinsics.txt"
depth_path = depth_dir / os.listdir(depth_dir)[0]
sensor_type = image_dir.relative_to(self._data_dir).parts[0]
baseline = self.sensor_to_params[sensor_type]["baseline"]
with open(intrinsics_file, "r") as fin:
lines = fin.readlines()
focal_length = float(lines[0].strip().split()[0])
img_depth = PIL.Image.open(depth_path)
tensor_depth = self._to_tensor(img_depth)
tensor_disparity = baseline * focal_length / (tensor_depth / 1000.0)
return tensor_disparity
def _read_sunrgbd_image(self, image_dir):
rgb_dir = os.path.join(image_dir, "image")
rgb_path = os.path.join(rgb_dir, os.listdir(rgb_dir)[0])
img_rgb = PIL.Image.open(rgb_path)
tensor_rgb = self._to_tensor(img_rgb)
tensor_d = self._get_disparity_tensor(image_dir)
tensor_rgbd = torch.cat((tensor_rgb, tensor_d), dim=0)
return tensor_rgbd
def _get_sunrgbd_scene_class(self, image_dir):
with open(os.path.join(image_dir, "scene.txt"), "r") as fin:
scene_class = fin.read().strip()
return scene_class
def __getitem__(self, idx):
# return tuple of image (H W C==4) and scene class index
image_dir = self.image_dirs[idx]
x_rgbd = self._read_sunrgbd_image(image_dir)
scene_class = self._get_sunrgbd_scene_class(image_dir)
scene_idx = self.class_to_idx[scene_class]
if self.transform:
x_rgbd = self.transform(x_rgbd)
if self.target_transform:
scene_idx = self.target_transform(scene_idx)
return x_rgbd, scene_idx
class ConcatDataLoaderIterator:
def __init__(self, _obj):
# Copy necessary data from _obj
self.data_loaders = _obj.data_loaders
self.output_keys = _obj.output_keys
self.max_total_steps = _obj.max_total_steps
self.epoch = _obj.epoch
# Construct iterators
self.step_counter = 0
self.iterators = [iter(dl) for dl in self.data_loaders]
self.indices = torch.cat(
[
torch.ones(_obj.iterator_lengths[i], dtype=torch.int32) * i
for i in range(_obj.num_data_loaders)
]
)
assert self.max_total_steps == len(self.indices)
if _obj.shuffle:
g = torch.Generator()
if self.epoch is not None:
# Have deterministic behaviour when epoch is set
g.manual_seed(_obj.seed + self.epoch)
shuffle_indices = torch.randperm(len(self.indices), generator=g)
self.indices = self.indices[shuffle_indices]
def __iter__(self):
return self
def __next__(self):
if self.step_counter >= self.max_total_steps:
raise StopIteration
idx = self.indices[self.step_counter]
output_key = self.output_keys[idx]
try:
batch = next(self.iterators[idx])
except StopIteration:
# We cycle over the data_loader to the beginning. This can happen when repeat_factor > 1
# Take note that in this case we always use same shuffling from same data_loader in an epoch
self.iterators[idx] = iter(self.data_loaders[idx])
batch = next(self.iterators[idx])
self.step_counter += 1
# Return batch and output_key
return batch, output_key
def __len__(self):
return self.max_total_steps
class ConcatDataLoader:
"""
ConcatDataLoader is used to group data loader objects.
When user iterate on this object, we will sample random data loader and
return their batch data with coresponding output_key.
With repeat_factors, user can do upsampling or downsampling the data loader.
Args:
data_loaders: the iterable objects that will be grouped
output_keys: List of keys that is used to identify the iterable output.
The list length should be the same as number of data_loaders.
repeat_factors: List of numbers that represent the upsampling / downsampling factor
to the coresponding data_loaders. Should have same length as data_loaders.
shuffle: Boolean that determine whether we should shuffle the ordering of the
data loaders (default: ``False``)
seed: the seed for randomness (default: ``42``)
"""
def __init__(
self, data_loaders, output_keys, repeat_factors, shuffle=False, seed=42
):
self.data_loaders = data_loaders
self.output_keys = output_keys
self.repeat_factors = repeat_factors
self.shuffle = shuffle
self.seed = seed
self.num_data_loaders = len(self.data_loaders)
assert self.num_data_loaders == len(output_keys)
assert self.num_data_loaders == len(repeat_factors)
# The iterator len is adjusted with repeat_factors
self.iterator_lengths = [
int(repeat_factors[i] * len(itb)) for i, itb in enumerate(self.data_loaders)
]
self.max_total_steps = sum(self.iterator_lengths)
self.epoch = None
def __len__(self):
return self.max_total_steps
def __iter__(self):
return ConcatDataLoaderIterator(self)
def set_epoch(self, epoch, is_distributed=False):
# Setting epoch will result in reproducible shuffling
self.epoch = epoch
if is_distributed:
# In distributed mode, we want to call set_epoch for the samplers
for data_loader in self.data_loaders:
data_loader.sampler.set_epoch(epoch)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/datasets.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This file, data_builder.py, consists of functions to build the dataset
# for training in torchmultimodal/examples/omnivore/train.py
# Since there are a lot of parameters, we allow to pass args here
import datetime
import logging
import os
import time
import omnivore.utils as utils
import torch
import torchvision
import torchvision.datasets.samplers as video_samplers
from omnivore.data import datasets, presets, transforms
from torch.utils.data.dataloader import default_collate
from torchvision.transforms.functional import InterpolationMode
logger = logging.getLogger(__name__)
def get_video_sampler(dataset, mode, args):
# Get sampler for video dataset
if mode == "train":
sampler_class = video_samplers.RandomClipSampler
clips_per_video = args.train_clips_per_video
elif mode == "val":
sampler_class = video_samplers.UniformClipSampler
clips_per_video = args.val_clips_per_video
sampler = sampler_class(dataset.video_clips, clips_per_video)
if args.distributed:
sampler = video_samplers.DistributedSampler(sampler)
return sampler
def get_image_sampler(dataset, mode, args):
# Get sampler for image with rgb or rgbd channel dataset
if args.distributed:
if mode == "train":
shuffle = True
elif mode == "val":
shuffle = False
sampler = torch.utils.data.distributed.DistributedSampler(
dataset, shuffle=shuffle
)
else:
if mode == "train":
sampler = torch.utils.data.RandomSampler(dataset)
elif mode == "val":
sampler = torch.utils.data.SequentialSampler(dataset)
return sampler
def construct_data_loader(dataset, sampler, num_workers, mode, args, drop_last=False):
collate_fn = None
if mode == "train":
num_classes = len(dataset.classes)
mixup_transforms = []
if args.mixup_alpha > 0.0:
mixup_transforms.append(
transforms.RandomMixup(num_classes, p=1.0, alpha=args.mixup_alpha)
)
if args.cutmix_alpha > 0.0:
mixup_transforms.append(
transforms.RandomCutmix(num_classes, p=1.0, alpha=args.cutmix_alpha)
)
if mixup_transforms:
mixupcutmix = torchvision.transforms.RandomChoice(mixup_transforms)
# Since not all dataset return tuple of same length, we take the
# first two elements for mixupcutmix during training
collate_fn = lambda batch: mixupcutmix(
*(default_collate(batch)[:2])
) # noqa: E731
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=num_workers,
pin_memory=args.loader_pin_memory,
collate_fn=collate_fn,
drop_last=drop_last,
)
return data_loader
def _get_cache_path(filepath):
import hashlib
h = hashlib.sha1(filepath.encode()).hexdigest()
cache_path = os.path.join(
"~", ".torch", "torchmultimodal", "omnivore_kinetics", h[:10] + ".pt"
)
cache_path = os.path.expanduser(cache_path)
return cache_path
def get_kinetics_dataset(
kinetics_path,
split,
transform,
step_between_clips,
args,
frame_rate=16,
frames_per_clip=32,
):
data_dir = os.path.join(kinetics_path, split)
cache_path = _get_cache_path(data_dir)
logger.info(f"cache_path: {cache_path}")
if args.cache_video_dataset and os.path.exists(cache_path):
logger.info(f"Loading {split} dataset from {cache_path}")
dataset, _ = torch.load(cache_path)
dataset.transform = transform
else:
if args.distributed:
logger.info(
"It is recommended to pre-compute the dataset cache on a single-gpu first, it will be faster!"
)
logger.info("Building kinetics dataset")
dataset = datasets.OmnivoreKinetics(
kinetics_path,
num_classes="400",
extensions=("avi", "mp4"),
output_format="TCHW",
frames_per_clip=frames_per_clip,
frame_rate=frame_rate,
step_between_clips=step_between_clips,
split=split,
transform=transform,
num_workers=args.kinetics_dataset_workers,
)
if args.cache_video_dataset:
logger.info(f"Saving {split} dataset to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, data_dir), cache_path)
return dataset
def get_imagenet_data_loader(mode, num_workers, args):
logger.info(f"Start getting {mode} imagenet data_loader")
# Get imagenet data
imagenet_path = args.imagenet_data_path
if mode == "train":
preset = presets.ImageNetClassificationPresetTrain(
crop_size=args.train_crop_size,
interpolation=InterpolationMode.BICUBIC,
random_erase_prob=args.random_erase,
color_jitter_factor=args.color_jitter_factor,
)
drop_last = args.loader_drop_last
elif mode == "val":
preset = presets.ImageNetClassificationPresetEval(
crop_size=args.val_crop_size, interpolation=InterpolationMode.BICUBIC
)
drop_last = False
dataset_dir = os.path.join(imagenet_path, mode)
dataset = torchvision.datasets.folder.ImageFolder(dataset_dir, preset)
sampler = get_image_sampler(dataset, mode, args)
data_loader = construct_data_loader(
dataset, sampler, num_workers, mode, args, drop_last=drop_last
)
logger.info(f"Finish getting {mode} imagenet data_loader")
return data_loader
def get_kinetics_data_loader(mode, num_workers, args):
logger.info(f"Start getting {mode} video data_loader")
# Get kinetics data
kinetics_path = args.kinetics_data_path
if mode == "train":
preset = presets.VideoClassificationPresetTrain(
crop_size=args.train_crop_size,
resize_size=args.train_resize_size,
)
drop_last = args.loader_drop_last
elif mode == "val":
preset = presets.VideoClassificationPresetEval(
crop_size=args.val_crop_size,
resize_size=args.val_resize_size,
)
drop_last = False
start_time = time.time()
logger.info(f"Start getting {mode} video dataset")
dataset = get_kinetics_dataset(
kinetics_path,
split=mode,
transform=preset,
step_between_clips=1,
args=args,
)
logger.info(f"Took {time.time() - start_time} seconds to get {mode} video dataset")
sampler = get_video_sampler(dataset, mode, args)
data_loader = construct_data_loader(
dataset, sampler, num_workers, mode, args, drop_last=drop_last
)
logger.info(f"Finish getting {mode} video data_loader")
return data_loader
def get_sunrgbd_data_loader(mode, num_workers, args):
logger.info(f"Start creating {mode} depth dataset")
# Get sunrgbd data
sunrgbd_path = args.sunrgbd_data_path
if mode == "train":
preset = presets.DepthClassificationPresetTrain(
crop_size=args.train_crop_size,
interpolation=InterpolationMode.BILINEAR,
random_erase_prob=args.random_erase,
max_depth=75.0,
mean=(0.485, 0.456, 0.406, 0.0418),
std=(0.229, 0.224, 0.225, 0.0295),
color_jitter_factor=args.color_jitter_factor,
)
drop_last = args.loader_drop_last
elif mode == "val":
preset = presets.DepthClassificationPresetEval(
crop_size=args.val_crop_size,
interpolation=InterpolationMode.BILINEAR,
max_depth=75.0,
mean=(0.485, 0.456, 0.406, 0.0418),
std=(0.229, 0.224, 0.225, 0.0295),
)
drop_last = False
dataset = datasets.OmnivoreSunRgbdDatasets(
root=sunrgbd_path, split=mode, transform=preset
)
sampler = get_image_sampler(dataset, mode, args)
data_loader = construct_data_loader(
dataset, sampler, num_workers, mode, args, drop_last=drop_last
)
logger.info(f"Finish getting {mode} depth dataset")
return data_loader
def get_omnivore_data_loader(mode, args):
modalities = args.modalities
data_loader_list = []
data_loader_builder_map = {
"image": get_imagenet_data_loader,
"video": get_kinetics_data_loader,
"rgbd": get_sunrgbd_data_loader,
}
if mode == "train":
data_sampling_factor = args.train_data_sampling_factor
shuffle = True
elif mode == "val":
data_sampling_factor = args.val_data_sampling_factor
shuffle = False
for i, modality in enumerate(modalities):
# Determine the number of workers
num_workers = args.workers
if modality == "video":
# Have extra workers for video data loader
num_workers += args.extra_video_dataloader_workers
if mode == "val":
# Adjust num val workers with args.val_num_worker_ratio
num_workers = max(int(num_workers * args.val_num_worker_ratio), 1)
# Sampling factor 0 means the modality produce no data, hence no need for worker
if data_sampling_factor[i] == 0:
num_workers = 0
# Build data_loader
data_loader = data_loader_builder_map[modality](mode, num_workers, args)
data_loader_list.append(data_loader)
omnivore_data_loader = datasets.ConcatDataLoader(
data_loader_list,
modalities,
data_sampling_factor,
shuffle=shuffle,
)
return omnivore_data_loader
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/data_builder.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import List, Optional
import torch
from torch import Tensor
from torchvision.transforms import autoaugment, functional as F, InterpolationMode
__all__ = ["RandAugment3d"]
def _apply_op(
img: Tensor,
op_name: str,
magnitude: float,
interpolation: InterpolationMode,
fill: Optional[List[float]],
):
# Modified from torchvision.transforms.autoaugment._apply_op
# we assume the input img has type float and in range 0 to 1
if op_name == "ShearX":
# magnitude should be arctan(magnitude)
# official autoaug: (1, level, 0, 0, 1, 0)
# https://github.com/tensorflow/models/blob/dd02069717128186b88afa8d857ce57d17957f03/research/autoaugment/augmentation_transforms.py#L290
# compared to
# torchvision: (1, tan(level), 0, 0, 1, 0)
# https://github.com/pytorch/vision/blob/0c2373d0bba3499e95776e7936e207d8a1676e65/torchvision/transforms/functional.py#L976
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[math.degrees(math.atan(magnitude)), 0.0],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == "ShearY":
# magnitude should be arctan(magnitude)
# See above
img = F.affine(
img,
angle=0.0,
translate=[0, 0],
scale=1.0,
shear=[0.0, math.degrees(math.atan(magnitude))],
interpolation=interpolation,
fill=fill,
center=[0, 0],
)
elif op_name == "TranslateX":
img = F.affine(
img,
angle=0.0,
translate=[int(magnitude), 0],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == "TranslateY":
img = F.affine(
img,
angle=0.0,
translate=[0, int(magnitude)],
scale=1.0,
interpolation=interpolation,
shear=[0.0, 0.0],
fill=fill,
)
elif op_name == "Rotate":
img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill)
elif op_name == "Brightness":
img = F.adjust_brightness(img, 1.0 + magnitude)
elif op_name == "Color":
img = F.adjust_saturation(img, 1.0 + magnitude)
elif op_name == "Contrast":
img = F.adjust_contrast(img, 1.0 + magnitude)
elif op_name == "Sharpness":
img = F.adjust_sharpness(img, 1.0 + magnitude)
elif op_name == "Posterize":
# The tensor dtype must be torch.uint8
# and values are expected to be in [0, 255]
img = (img * 255.9999).to(dtype=torch.uint8)
img = F.posterize(img, int(magnitude))
img = (img / 255.9999).to(dtype=torch.float32)
elif op_name == "Solarize":
# The tensor dtype must be torch.uint8
# and values are expected to be in [0, 255]
img = (img * 255.9999).to(dtype=torch.uint8)
img = F.solarize(img, int(magnitude))
img = (img / 255.9999).to(dtype=torch.float32)
elif op_name == "AutoContrast":
img = F.autocontrast(img)
elif op_name == "Equalize":
# The tensor dtype must be torch.uint8
# and values are expected to be in [0, 255]
img = (img * 255.9999).to(dtype=torch.uint8)
img = F.equalize(img)
img = (img / 255.9999).to(dtype=torch.float32)
elif op_name == "Invert":
img = F.invert(img)
elif op_name == "Identity":
pass
else:
raise ValueError(f"The provided operator {op_name} is not recognized.")
return img
class RandAugment3d(autoaugment.RandAugment):
"""Modified RandAugment in order to handle single-view depth image.
In here, color / non-geometric operation will only be applied on RGB channel.
Args:
num_ops (int): Number of augmentation transformations to apply sequentially.
magnitude (int): Magnitude for all the transformations.
num_magnitude_bins (int): The number of different magnitude values.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
) -> None:
super().__init__(
num_ops=num_ops,
magnitude=magnitude,
num_magnitude_bins=num_magnitude_bins,
interpolation=interpolation,
fill=fill,
)
self.geom_ops = {
"Identity",
"ShearX",
"ShearY",
"TranslateX",
"TranslateY",
"Rotate",
}
def forward(self, img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: Transformed image.
"""
fill = self.fill
channels, height, width = F.get_dimensions(img)
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * channels
elif fill is not None:
fill = [float(f) for f in fill]
op_meta = self._augmentation_space(self.num_magnitude_bins, (height, width))
for _ in range(self.num_ops):
op_index = int(torch.randint(len(op_meta), (1,)).item())
op_name = list(op_meta.keys())[op_index]
magnitudes, signed = op_meta[op_name]
magnitude = (
float(magnitudes[self.magnitude].item()) if magnitudes.ndim > 0 else 0.0
)
if signed and torch.randint(2, (1,)):
magnitude *= -1.0
if op_name in self.geom_ops:
# apply geometric operation on RGBD image
img = _apply_op(
img, op_name, magnitude, interpolation=self.interpolation, fill=fill
)
else:
# Apply non_geom operation on the RGB channels only
img[:3, :, :] = _apply_op(
img[:3, :, :],
op_name,
magnitude,
interpolation=self.interpolation,
fill=fill,
)
return img
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/omnivore/data/rand_aug3d.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/common/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import warnings
from functools import partial
from typing import Callable, List, Optional
import torch
from pytorch_lightning import LightningDataModule
class MultiDataLoader:
# NOTE: Please check MMF's MultiDataLoader if you want to support
# epoch based sampling funcs.
def __init__(
self,
loaders: List[torch.utils.data.DataLoader],
sampling_func: Optional[Callable] = None,
):
"""MultiDataLoader takes in a list of dataloaders and a sampling function
and cycles between these dataloaders after each batch based on the index
provided by the sampling function passed. Useful for doing multi-tasking
over multiple datasets
Args:
loaders (List[torch.utils.data.DataLoader]): List of dataloaders on
which the multitasking has to be done.
sampling_func (Optional[Callable], optional): Function which will return
the next index to be selected. Defaults to equally weight sampling.
"""
if loaders is None or len(loaders) == 0:
warnings.warn(
"Empty loaders passed into MultiDataLoader. This can have "
"unintended consequences."
)
if sampling_func is None:
sampling_func = partial(random.choice, range(len(loaders)))
self.sampling_func = sampling_func
self.loaders = loaders
self.num_datasets = len(self.loaders)
self.iterators = [None for _ in loaders]
self.current_index = 0
self.set_samplers()
def set_samplers(self):
self.samplers: List[torch.utils.data.Sampler] = []
for loader in self.loaders:
if hasattr(loader, "sampler"):
self.samplers.append(loader.sampler)
def __iter__(self):
self.iterators = []
for loader in self.loaders:
self.iterators.append(iter(loader))
self.change_dataloader()
return self
def __next__(self):
"""
Calculation of next batch is performed using following logic.
Current chosen iterator is set in the change_dataloader function
based on the `sampling_func` function passed to `__init__` of the
dataloader which is called to get the index of next selected dataloader.
If we get the next batch from iterator without any StopIteration exception,
we return it as it is.
Epochs don't make sense in case of using `sampling_func` unless you add
extra logic to support epoch-based sampling functions. MMF does this in
a different way, so take a look at IterationStrategies there to understand
how this can be possibly done.
Think of a case of random (equal) proportional sampling for dataset x and y
where x is half the size of y. When x will complete its 2 epochs, y will
have only 1 epoch completed. **So please don't use max_epochs or epoch
based training in this case as it won't be honored**. If an iterator is
finished, we just reignite it in this case and finished iterators
variable isn't used. This means that this case will never reach the
__iter__ function ever again.
Returns:
Dict: Contains two keys, one "batch" containing the batch from current
selected dataloader and "datamodule_index" which is index of
currently selected dataloader.
"""
self.change_dataloader()
try:
next_batch = next(self.current_iterator)
except StopIteration:
iterator = iter(self.loaders[self.current_index])
self.iterators[self.current_index] = iterator
self.current_iterator = iterator
next_batch = next(self.current_iterator)
return {"batch": next_batch, "datamodule_index": self.current_index}
def change_dataloader(self):
choice = 0
if self.num_datasets <= 1:
self.current_index = choice
self.current_iterator = self.iterators[self.current_index]
return
choice = [self.sampling_func()]
if torch.distributed.is_available() and torch.distributed.is_initialized():
# This broadcast is probably unnecessary with lightning if everything
# is already properly seeded. But,to be on safe side, we can still
# do this.
# There are also some smarter ways to do this to avoid any broadcasting
# by basically having a fixed generator with a fixed seed which will
# always work deterministically.
# TODO: Check if not doing this provides any speed benefits.
torch.distributed.broadcast_object_list(choice, 0)
self.current_index = choice[0]
self.current_iterator = self.iterators[self.current_index]
def set_epoch(self, epoch: int):
if torch.distributed.is_available() and torch.distributed.is_initialized():
for sampler in self.samplers:
if sampler is not None and hasattr(sampler, "set_epoch"):
sampler.set_epoch(epoch)
class MultiDataModule(LightningDataModule):
"""MultiDataModule is just an abstraction over MultiDataLoader
that will allow us to integrate it with Lightning.
"""
# NOTE: Add rest of the functions that should be called on child datamodules
# as required
def __init__(
self,
datamodules: List[LightningDataModule],
sampling_func: Optional[Callable] = None,
):
super().__init__()
self.datamodules = datamodules
self.sampling_func = sampling_func
self.current_datamodule_idx = 0
def setup(self, stage=None):
for datamodule in self.datamodules:
datamodule.setup(stage)
def prepare_data(self):
for datamodule in self.datamodules:
datamodule.prepare_data()
def train_dataloader(self) -> MultiDataLoader:
# TODO: Fix assign inconsistency
return self._build_multi_dataloader("train")
def val_dataloader(self) -> MultiDataLoader:
return self._build_multi_dataloader("val")
def test_dataloader(self) -> MultiDataLoader:
return self._build_multi_dataloader("test")
def _build_multi_dataloader(self, split="train"):
dataloaders = []
for datamodule in self.datamodules:
dataloaders.append(getattr(datamodule, f"{split}_dataloader")())
return MultiDataLoader(dataloaders, self.sampling_func)
def on_before_batch_transfer(self, batch, *args):
batch, index = batch["batch"], batch["datamodule_index"]
self.current_datamodule_idx = index
return self.datamodules[self.current_datamodule_idx].on_before_batch_transfer(
batch, *args
)
def on_after_batch_transfer(self, batch, *args):
return self.datamodules[self.current_datamodule_idx].on_after_batch_transfer(
batch, *args
)
def teardown(self, stage):
for datamodule in self.datamodules:
datamodule.teardown(stage)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/common/data/multidata.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .multidata import * # noqa F401
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/common/data/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import os
import random
import time
import ruamel.yaml as yaml
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from data.vqa_datamodules import VQADataModule
from model import albef_model_for_vqa
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from utils import (
add_weight_decay,
get_rank,
get_world_size,
init_distributed_mode,
is_dist_avail_and_initialized,
is_main_process,
save_result,
)
def train(model, datamodule, args, device):
model_without_ddp = model.module if is_dist_avail_and_initialized() else model
model.train()
optimizer_params = add_weight_decay(model, args["weight_decay"])
optimizer = AdamW(optimizer_params, lr=args["lr"])
scheduler = CosineAnnealingWarmRestarts(
optimizer, T_0=args["max_epochs"], eta_min=args["min_lr"]
)
step_size = args["step_size"]
warmup_steps = args["warmup_steps"]
warmup_iterations = warmup_steps * step_size
data_loader = datamodule.train_dataloader(
is_distributed=is_dist_avail_and_initialized(),
num_tasks=get_world_size(),
global_rank=get_rank(),
)
start_time = time.time()
for epoch in range(args["max_epochs"]):
if is_dist_avail_and_initialized():
data_loader.sampler.set_epoch(epoch)
if epoch > 0:
scheduler.step(epoch + warmup_steps)
for batch, (
images,
questions,
questions_atts,
answers,
answers_atts,
ans_weights,
ans_lengths,
) in enumerate(data_loader):
if epoch > 0:
alpha = args["alpha"]
else:
alpha = args["alpha"] * min(1, batch / len(data_loader))
images = images.to(device, non_blocking=True)
questions = questions.to(device)
questions_atts = questions_atts.to(device)
answers = answers.to(device)
answers_atts = answers_atts.to(device)
ans_weights = ans_weights.to(device)
loss = model(
images,
questions,
questions_atts,
answers,
answers_atts,
ans_weights=ans_weights,
ans_lengths=ans_lengths,
alpha=alpha,
is_train=True,
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch == 0 and batch % step_size == 0 and batch <= warmup_iterations:
scheduler.step(batch // step_size)
if batch % args["log_every_n_steps"] == 0:
total_time = time.time() - start_time
time_str = "time {},".format(
datetime.timedelta(seconds=int(total_time))
)
epoch_str = "epoch {}/{},".format(epoch, args["max_epochs"])
batch_str = "batch {}/{},".format(batch, len(data_loader))
loss_str = "loss {}".format(loss.item())
print(time_str, epoch_str, batch_str, loss_str)
if is_main_process():
save_obj = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"epoch": epoch,
}
torch.save(
save_obj,
os.path.join(args["checkpoint_root"], "vqa_checkpoint_%02d.pt" % epoch),
)
if is_dist_avail_and_initialized():
dist.barrier()
@torch.no_grad()
def evaluation(model, datamodule, args, device):
model.eval()
result = []
answer_list = datamodule.test_dataset.answer_list
answer_input_ids = datamodule.test_dataset.answer_input_ids.to(device)
answer_atts = datamodule.test_dataset.answer_attention_mask.to(device)
data_loader = datamodule.test_dataloader(
is_distributed=is_dist_avail_and_initialized(),
num_tasks=get_world_size(),
global_rank=get_rank(),
)
start_time = time.time()
for batch, (img, ques, ques_atts, ques_ids) in enumerate(data_loader):
img = img.to(device, non_blocking=True)
ques = ques.to(device)
ques_atts = ques_atts.to(device)
topk_ids, topk_probs = model(
img,
ques,
ques_atts,
answer_input_ids,
answer_atts,
k=args["k_test"],
is_train=False,
)
for ques_id, topk_id, topk_prob in zip(ques_ids, topk_ids, topk_probs):
_, pred = topk_prob.max(dim=0)
result.append(
{"question_id": ques_id, "answer": answer_list[topk_id[pred]]}
)
if batch % args["log_every_n_steps"] == 0:
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(
"time {}, batch {}/{}".format(total_time_str, batch, len(data_loader))
)
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="./examples/albef/configs/vqa.yaml")
args = parser.parse_args()
config = yaml.load(open(args.config, "r"), Loader=yaml.Loader)
init_distributed_mode(config)
device = torch.device(config["device"])
seed = config["seed"] + get_rank()
torch.manual_seed(seed)
random.seed(seed)
cudnn.benchmark = True
datamodule = VQADataModule(**config["datamodule_args"])
model = albef_model_for_vqa(config, pretrained=True)
model = model.to(device)
if is_dist_avail_and_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config["gpu"]]
)
train(model, datamodule, config["training_args"], device)
result = evaluation(model, datamodule, config["eval_args"], device)
save_result(result, config["output_root"], "vqa_output")
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/finetune_vqa.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from torchmultimodal.models.albef.image_encoder import ALBEFVisionEncoder
from torchmultimodal.models.albef.model import ALBEFModel, ALBEFModelWithSimilarity
from torchmultimodal.models.albef.multimodal_encoder import ALBEFMultimodalEncoder
from torchmultimodal.modules.encoders.bert_text_encoder import bert_text_encoder
from torchmultimodal.modules.layers.text_embedding import BERTTextEmbeddings
from torchmultimodal.modules.losses.albef import (
CausalLanguageModelingLoss,
ImageTextContrastiveLoss,
)
from torchmultimodal.utils.attention import get_causal_attention_mask
from torchmultimodal.utils.common import momentum_update, remove_grad
_ALBEF_PRETRAINED_URLS = {
"vqa": "https://download.pytorch.org/models/multimodal/albef/pretrained_vqa_checkpoint.pt",
"retrieval": "https://download.pytorch.org/models/multimodal/albef/pretrained_retrieval_checkpoint.pt",
}
class PredictionHead(nn.Module):
"""
Predict the following token autoregressively.
Args:
vocab_size (int): The number of different tokens the prediction_head can predict.
hidden_size (int): The hidden size of the prediction_head.
layer_norm_eps (float): The epsilon used by the prediction_head normalization layer.
transform_act_fn (Callable[[Tensor], Tensor]): The activation function in the prediction_head.
Inputs:
hidden_states (Tensor): The hidden states of preceding tokens.
Returns:
Tensor: Prediction scores for the following token.
"""
def __init__(
self,
vocab_size: int = 30522,
hidden_size: int = 768,
layer_norm_eps: float = 1e-12,
transform_act_fn: Callable[[Tensor], Tensor] = nn.functional.gelu,
) -> None:
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.transform_act_fn = transform_act_fn
self.layer_norm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
self.decoder = nn.Linear(hidden_size, vocab_size)
def forward(self, hidden_states: Tensor) -> Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class ALBEFDecoder(nn.Module):
"""
Generate the prediction scores for answers from image and question hidden states.
Args:
text_embeddings (ALBEFTextEmbeddings): Instantiated ALBEFTextEmbeddings.
multimodal_encoder (ALBEFMultimodalEncoder): Instantiated ALBEFMultimodalEncoder.
prediction_head (PredictionHead): Instantiated PredictionHead.
Inputs:
input_ids (Tensor of shape (batch_size, seq_len)):
Input ids for input text tokens.
attention_mask (Tensor of shape (batch_size, seq_len)):
Input attention mask to avoid performing attention on padding token indices.
encoder_hidden_states (Tensor of shape (batch_size, encoder_seq_len, hidden_size)):
The encoder hidden states.
encoder_attention_mask (Tensor of shape (batch_size, encoder_seq_len)):
The attention mask for encoder hidden states.
Returns:
Tensor: Prediction scores for answers.
"""
def __init__(
self,
text_embeddings: BERTTextEmbeddings,
multimodal_encoder: ALBEFMultimodalEncoder,
prediction_head: PredictionHead,
) -> None:
super().__init__()
self.text_embeddings = text_embeddings
self.multimodal_encoder = multimodal_encoder
self.prediction_head = prediction_head
def get_extended_attention_mask_for_decoder(self, attention_mask: Tensor) -> Tensor:
"""
Apply a causal mask in addition to the padding mask and make the mask broadcastable,
such that future and masked tokens are ignored.
Args:
attention_mask (Tensor):
Padding mask with ones indicating tokens to attend to, zeros for tokens to ignore.
Returns:
extended_attention_mask (Tensor):
The broadcastable attention mask, with the same dtype as ``attention_mask.dtype``.
"""
device = attention_mask.device
batch_size, seq_length = attention_mask.shape
causal_mask = get_causal_attention_mask(seq_length).to(device)
causal_mask = causal_mask.repeat(batch_size, 1).view(
batch_size, seq_length, seq_length
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
extended_attention_mask = extended_attention_mask.to(dtype=attention_mask.dtype)
return extended_attention_mask
def forward(
self,
input_ids: Tensor,
attention_mask: Tensor,
encoder_hidden_states: Tensor,
encoder_attention_mask: Tensor,
) -> Tensor:
hidden_states = self.text_embeddings(input_ids)
attention_mask = self.get_extended_attention_mask_for_decoder(attention_mask)
decoder_output = self.multimodal_encoder(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
prediction_scores = self.prediction_head(decoder_output)
return prediction_scores
class ALBEFModelForVQA(nn.Module):
"""
ALBEF Model for VQA finetuning and inference.
Args:
model (ALBEFModel): Instantiated ALBEFModel.
answer_decoder (ALBEFDecoder): Instantiated ALBEFDecoder.
loss (CausalLanguageModelingLoss): Instantiated CausalLanguageModelingLoss.
Inputs:
image (Tensor of shape (B, C, H, W)): Image features.
question (Tensor of shape (B, L)): Question text features.
question_atts (Tensor of shape (B, L)): Question attention mask.
answers (Tensor of shape (N, M)): Answer text features.
answers_atts (Tensor of shape (N, M)): Answer attention mask.
ans_weights (Optional[Tensor] of shape (N)): Weights for each answer.
Required if is_train is True.
ans_lengths (Optional[List[int]] of length B): Number of answers for each question.
ans_lengths should sum to N.
Required if is_train is True.
alpha (Optional[float]): The interpolation value between clm_loss and loss_distill.
Required if is_train is True.
k (Optional[int]): The number of answers to return for inference.
Required if is_train is False.
is_train (Optional[bool]): Whether the model is in training.
Returns:
is_train is True:
Tensor: The masked language modeling loss for input.
is_train is False:
Tuple[Tensor, Tensor]: The ids and probabilities for the top k predicted answers.
"""
def __init__(
self,
model: ALBEFModel,
answer_decoder: ALBEFDecoder,
loss: CausalLanguageModelingLoss,
) -> None:
super().__init__()
self.model = model
self.answer_decoder = answer_decoder
self.loss = loss
self.answer_decoder_m = copy.deepcopy(self.answer_decoder)
remove_grad(
self.answer_decoder_m
) # remove gradient for the momentum decoder model
def _train_forward(
self,
image: Tensor,
question: Tensor,
question_atts: Tensor,
answers: Tensor,
answers_atts: Tensor,
ans_weights: Tensor,
ans_lengths: List[int],
alpha: float,
) -> Tensor:
"""
Forward step for training. Encode the inputs with the ALBEFModel.
Generate pseudo-targets using answer_decoder_m (momentum decoder model).
Generate answer predictions using answer_decoder.
Compute masked language modeling loss of the predictions using answers as labels,
pseudo-targets as soft-labels, and alpha as their interpolation value.
Inputs:
image (Tensor of shape (B, C, H, W)): Image features.
question (Tensor of shape (B, L)): Question text features.
question_atts (Tensor of shape (B, L)): Question attention mask.
answers (Tensor of shape (N, M)): Answer text features.
answers_atts (Tensor of shape (N, M)): Answer attention mask.
ans_weights (Tensor of shape (N)): Weights for each answer.
ans_lengths (List[int] of length B): Number of answers for each question.
ans_lengths should sum to N.
alpha (float): The interpolation value between clm_loss and loss_distill.
Returns:
Tensor: The masked language modeling loss for input.
"""
# get image-question embeddings from the ALBEFModel and format it to match the ans_lengths
encoder_outputs = self.model(image, question, question_atts)
(
encoder_hidden_states,
encoder_hidden_states_m,
encoder_attention_mask,
) = self._encoder_hidden_states(
encoder_outputs.multimodal_embeddings,
encoder_outputs.multimodal_embeddings_m,
question_atts,
ans_lengths,
)
# use the momentum model to generate pseudo-targets
with torch.no_grad():
momentum_update(
self.answer_decoder, self.answer_decoder_m, self.model.momentum
)
prediction_scores_m = self.answer_decoder_m(
input_ids=answers,
attention_mask=answers_atts,
encoder_hidden_states=encoder_hidden_states_m,
encoder_attention_mask=encoder_attention_mask,
)
# generate answer predictions
prediction_scores = self.answer_decoder(
input_ids=answers,
attention_mask=answers_atts,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
# compute masked language modeling loss from the prediction scores
labels = answers.masked_fill(answers == 0, self.loss.mask_token_id)
loss = self.loss(labels, prediction_scores, prediction_scores_m, alpha)
loss = ans_weights * loss
loss = loss.sum() / image.size(0)
return loss
def _eval_forward(
self,
image: Tensor,
question: Tensor,
question_atts: Tensor,
answers: Tensor,
answer_atts: Tensor,
k: int = 128,
) -> Tuple[Tensor, Tensor]:
"""
Forward step for evaluation. Encode the inputs with the ALBEFModel.
Generate answer autoregressively using the decoder, starting with the [CLS] token.
Compute the answer ids and their perspective probabilities of the top k predictions.
Inputs:
image (Tensor of shape (B, C, H, W)): Image features.
question (Tensor of shape (B, L)): Question text features.
question_atts (Tensor of shape (B, L)): Question attention mask.
answers (Tensor of shape (N, M)): Answer text features.
answer_atts (Tensor of shape (N, M)): Answer attention mask.
k (int): The number of answers to return for inference.
Returns:
Tuple[Tensor, Tensor]: The ids and probabilities for the top k predicted answers.
"""
# get multimodal embeddings from the ALBEFModel and
# feed it to the decoder as cross attention
encoder_outputs = self.model(image, question, question_atts)
# use cls token as the decoder's initial input token
num_ques = question.size(0)
start_ids = answers[0, 0].repeat(num_ques, 1)
atts = torch.ones(start_ids.shape).to(image.device)
# auto-regressively generates the answer
prediction_scores = self.answer_decoder(
input_ids=start_ids,
attention_mask=atts,
encoder_hidden_states=encoder_outputs.multimodal_embeddings,
encoder_attention_mask=question_atts,
)
logits = prediction_scores[:, 0, :]
answer_first_token = answers[:, 1]
prob_first_token = F.softmax(logits, dim=1).index_select(
dim=1, index=answer_first_token
)
topk_probs, topk_ids = prob_first_token.topk(k, dim=1)
input_ids = []
input_atts = []
for topk_id in topk_ids:
input_ids.append(answers.index_select(dim=0, index=topk_id))
input_atts.append(answer_atts.index_select(dim=0, index=topk_id))
input_ids = torch.cat(input_ids)
input_atts = torch.cat(input_atts)
targets_ids = input_ids.masked_fill(input_ids == 0, self.loss.mask_token_id)
question_states = encoder_outputs.multimodal_embeddings.repeat_interleave(
k, dim=0
)
question_atts = question_atts.repeat_interleave(k, dim=0)
prediction_scores = self.answer_decoder(
input_ids=input_ids,
attention_mask=input_atts,
encoder_hidden_states=question_states,
encoder_attention_mask=question_atts,
)
answer_loss = self.loss(targets_ids, prediction_scores)
answer_loss = answer_loss.view(input_ids.size(0), -1)
# topk_prob: first token probability
topk_probs = topk_probs.view(-1, 1)
log_probs = torch.cat([topk_probs.log(), -answer_loss], dim=1)
# re-calculate log probabilities for the answer sequences using chain rule
log_probs_sum = log_probs.sum(1)
log_probs_sum = log_probs_sum.view(num_ques, k)
topk_probs = F.softmax(log_probs_sum, dim=-1)
# get top-k after re-ranking
topk_probs, rerank_id = topk_probs.topk(k, dim=1)
topk_ids = torch.gather(topk_ids, 1, rerank_id)
return topk_ids, topk_probs
def _encoder_hidden_states(
self,
multimodal_embeds: Tensor,
multimodal_embeds_m: Tensor,
question_atts: Tensor,
ans_lengths: List[int],
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Repeat each image-question input, repeat its embedding and mask to match the number of answers it has.
Args:
multimodal_embeds (Tensor): Image-question embeddings.
multimodal_embeds_m (Tensor): Image-question embeddings from the momentum model.
question_atts (Tensor): Question attention mask.
ans_lengths (List[int]): The number of answers each image-question input has.
Returns:
encoder_hidden_states (Tensor): Image-question embeddings after the repetition.
encoder_hidden_states_m (Tensor): Image-question embeddings from the momentum model after the repetition.
encoder_attention_mask (Tensor): Question attention mask after the repetition.
"""
encoder_hidden_states = []
encoder_attention_mask = []
for b, n in enumerate(ans_lengths):
encoder_hidden_states += [multimodal_embeds[b]] * n
encoder_attention_mask += [question_atts[b]] * n
encoder_hidden_states = torch.stack(encoder_hidden_states)
encoder_attention_mask = torch.stack(encoder_attention_mask)
with torch.no_grad():
encoder_hidden_states_m = []
for b, n in enumerate(ans_lengths):
encoder_hidden_states_m += [multimodal_embeds_m[b]] * n
encoder_hidden_states_m = torch.stack(encoder_hidden_states_m)
return encoder_hidden_states, encoder_hidden_states_m, encoder_attention_mask
def forward(
self,
image: Tensor,
question: Tensor,
question_atts: Tensor,
answers: Tensor,
answers_atts: Tensor,
ans_weights: Optional[Tensor] = None,
ans_lengths: Optional[List[int]] = None,
alpha: Optional[float] = 0.0,
k: Optional[int] = 128,
is_train: Optional[bool] = True,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
if is_train:
return self._train_forward(
image,
question,
question_atts,
answers,
answers_atts,
ans_weights,
ans_lengths,
alpha,
)
else:
return self._eval_forward(
image,
question,
question_atts,
answers,
answers_atts,
k,
)
class ALBEFModelForRetrieval(nn.Module):
"""
ALBEF Model for Retrieval finetuning and inference.
In training mode, the forward step computes image-text contrastive loss and
image-text matching loss.
In evaluation mode, the forward step takes 3 types of input:
image: encode image input, project and normalize the embeddings.
text: encode text input, project and normalize the embeddings.
multimodal: create multimodal embeddings from image and text
embeddings, and compute image-text matching scores.
Args:
model_with_similarity (ALBEFModelWithSimilarity): Instantiated ALBEFModelWithSimilarity.
itc_loss (ImageTextContrastiveLoss): Instantiated ImageTextContrastiveLoss.
hidden_size (int): Dimensionality of encoder outputs.
Inputs:
image (Optional[Tensor] of shape (B, C, H, W)): Image features.
Required if is_train is True.
Required if input_type is "image" or "multimodal".
text (Optional[Tensor] of shape (B, L)): Text features.
Required if is_train is True.
Required if input_type is "text" or "multimodal".
text_atts (Tensor of shape (B, L)): Text attention mask.
Required if is_train is True.
Required if input_type is "text" or "multimodal".
idx (Tensor of shape (B)): Identifier for each image sample.
Required if is_train is True.
alpha (Optional[float]): The interpolation value between clm_loss and loss_distill.
Default is 0.
input_type (Optional[str]): "image", "text", or "multimodal" indicating the encoding type.
Required if is_train is False.
is_train (Optional[bool]): Whether the model is in training.
Default is True.
Returns:
is_train is True:
Tensor: The sum of itc loss and itm loss.
is_train is False:
input_type is "image":
Tuple[Tensor, Tensor]: Image embeddings and projected image features.
input_type is "text":
Tuple[Tensor, Tensor]: Text embeddings and projected text features.
input_type is "multimodal"
Tensor: Scores for the retrieval task.
"""
def __init__(
self,
model_with_similarity: ALBEFModelWithSimilarity,
itc_loss: ImageTextContrastiveLoss,
hidden_size: int,
) -> None:
super().__init__()
self.model_with_similarity = model_with_similarity
self.itc_loss = itc_loss
self.itm_head = nn.Linear(hidden_size, 2)
def _train_forward(
self,
image: Tensor,
text: Tensor,
text_atts: Tensor,
idx: Tensor,
alpha: float,
) -> Tensor:
encoder_output = self.model_with_similarity(image, text, text_atts, idx)
# compute image-text contrastive loss
similarity_outputs = encoder_output.similarity
similarity_targets = encoder_output.sim_targets
itc_loss = self.itc_loss(
similarity_outputs.sim_i2t,
similarity_outputs.sim_t2i,
similarity_outputs.sim_i2t_m,
similarity_outputs.sim_t2i_m,
similarity_targets,
alpha,
)
# compute image-text matching loss
pos_embeddings = encoder_output.multimodal_embeddings[:, 0, :]
neg_embeddings = encoder_output.multimodal_embeddings_neg[:, 0, :]
vl_embeddings = torch.cat([pos_embeddings, neg_embeddings], dim=0)
vl_output = self.itm_head(vl_embeddings)
itm_labels = torch.cat(
[
torch.ones(pos_embeddings.size(0), dtype=torch.long),
torch.zeros(neg_embeddings.size(0), dtype=torch.long),
],
dim=0,
).to(vl_embeddings.device)
itm_loss = F.cross_entropy(vl_output, itm_labels)
loss = itc_loss + itm_loss
return loss
def _encode_image(
self,
image: Tensor,
) -> Tuple[Tensor, Tensor]:
image_embed = self.model_with_similarity.albef_model.vision_encoder(image)
image_feat = F.normalize(
self.model_with_similarity.vision_proj(image_embed[:, 0, :]), dim=-1
)
return image_embed, image_feat
def _encode_text(
self,
text: Tensor,
text_atts: Tensor,
) -> Tuple[Tensor, Tensor]:
text_embed = self.model_with_similarity.albef_model.text_encoder(
text, text_atts
).last_hidden_state
text_feat = F.normalize(
self.model_with_similarity.text_proj(text_embed[:, 0, :]), dim=-1
)
return text_embed, text_feat
def _image_text_matching_score(
self,
image: Tensor,
text: Tensor,
text_atts: Tensor,
) -> Tensor:
multimodal_embeds = self.model_with_similarity.albef_model.multimodal_encoder(
text,
text_atts,
image,
)
score = self.itm_head(multimodal_embeds[:, 0, :])[:, 1]
return score
def _eval_forward(
self,
input_type: str,
image: Optional[Tensor],
text: Optional[Tensor],
text_atts: Optional[Tensor],
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
if input_type == "image":
assert image is not None, "image input tensor cannot be None"
return self._encode_image(image)
elif input_type == "text":
assert (
text is not None and text_atts is not None
), "text and text attention mask cannot be None"
return self._encode_text(text, text_atts)
elif input_type == "multimodal":
assert (
image is not None and text is not None and text_atts is not None
), "image embeddings, text embeddings, and text attention mask cannot be None"
return self._image_text_matching_score(image, text, text_atts)
else:
raise ValueError("input_type must be image, text, or multimodal")
def forward(
self,
image: Optional[Tensor] = None,
text: Optional[Tensor] = None,
text_atts: Optional[Tensor] = None,
idx: Optional[Tensor] = None,
alpha: Optional[Tensor] = 0.0,
input_type: Optional[str] = None,
is_train: Optional[bool] = True,
) -> Union[Tensor, Tuple[Tensor, Tensor]]:
if is_train:
return self._train_forward(
image,
text,
text_atts,
idx,
alpha,
)
else:
return self._eval_forward(
input_type,
image,
text,
text_atts,
)
def albef_model_for_vqa(
config: Dict[str, Any], pretrained: bool = False
) -> ALBEFModelForVQA:
vision_encoder = ALBEFVisionEncoder(**config["vision_encoder_args"])
text_encoder = bert_text_encoder(**config["text_encoder_args"])
question_multimodal_encoder = ALBEFMultimodalEncoder(
**config["multimodal_encoder_args"]
)
text_embeddings = BERTTextEmbeddings(**config["text_embeddings_args"])
answer_multimodal_encoder = ALBEFMultimodalEncoder(
**config["multimodal_encoder_args"]
)
prediction_head = PredictionHead(**config["prediction_head_args"])
albef_model = ALBEFModel(vision_encoder, text_encoder, question_multimodal_encoder)
decoder = ALBEFDecoder(text_embeddings, answer_multimodal_encoder, prediction_head)
loss = CausalLanguageModelingLoss()
model = ALBEFModelForVQA(albef_model, decoder, loss)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
_ALBEF_PRETRAINED_URLS["vqa"], map_location="cpu"
)
model.load_state_dict(checkpoint)
return model
def albef_model_for_retrieval(
config: Dict[str, Any], pretrained: bool = False
) -> ALBEFModelForRetrieval:
vision_encoder = ALBEFVisionEncoder(**config["vision_encoder_args"])
text_encoder = bert_text_encoder(**config["text_encoder_args"])
multimodal_encoder = ALBEFMultimodalEncoder(**config["multimodal_encoder_args"])
vision_proj = nn.Linear(**config["projection_args"])
text_proj = nn.Linear(**config["projection_args"])
albef_model = ALBEFModel(vision_encoder, text_encoder, multimodal_encoder)
albef_model_with_sim = ALBEFModelWithSimilarity(
albef_model, vision_proj, text_proj, **config["similarity_args"]
)
itc_loss = ImageTextContrastiveLoss()
model = ALBEFModelForRetrieval(
albef_model_with_sim, itc_loss, config["hidden_size"]
)
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
_ALBEF_PRETRAINED_URLS["retrieval"], map_location="cpu"
)
model.load_state_dict(checkpoint)
return model
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/model.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import torch
import torch.distributed as dist
from torch import nn
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args["rank"] = int(os.environ["RANK"])
args["world_size"] = int(os.environ["WORLD_SIZE"])
args["gpu"] = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args["rank"] = int(os.environ["SLURM_PROCID"])
args["gpu"] = args["rank"] % torch.cuda.device_count()
else:
print("Not using distributed mode")
args["distributed"] = False
return
args["distributed"] = True
torch.cuda.set_device(args["gpu"])
args["dist_backend"] = "nccl"
print(
"| distributed init (rank {}): {}".format(args["rank"], args["dist_url"]),
flush=True,
)
torch.distributed.init_process_group(
backend=args["dist_backend"],
init_method=args["dist_url"],
world_size=args["world_size"],
rank=args["rank"],
)
torch.distributed.barrier()
setup_for_distributed(args["rank"] == 0)
def save_result(result, directory, file_name):
rank_path = os.path.join(directory, "{}_rank_{}.json".format(file_name, get_rank()))
main_path = os.path.join(directory, "{}.json".format(file_name))
json.dump(result, open(rank_path, "w"))
if is_dist_avail_and_initialized():
dist.barrier()
if is_main_process():
result = []
for rank in range(get_world_size()):
rank_path = os.path.join(
directory, "{}_rank_{}.json".format(file_name, rank)
)
rank_res = json.load(open(rank_path, "r"))
result += rank_res
json.dump(result, open(main_path, "w"))
if is_dist_avail_and_initialized():
dist.barrier()
def add_weight_decay(model: nn.Module, weight_decay: float) -> None:
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # skip weight_decay for momentum models
if len(param.shape) == 1 or name.endswith(".bias"):
no_decay.append(param)
else:
decay.append(param)
return [
{"params": no_decay, "weight_decay": 0.0},
{"params": decay, "weight_decay": weight_decay},
]
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import os
import random
import time
import ruamel.yaml as yaml
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from data.retrieval_datamodule import RetrievalDataModule
from model import albef_model_for_retrieval
from torch.optim import AdamW
from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts
from utils import (
add_weight_decay,
get_rank,
get_world_size,
init_distributed_mode,
is_dist_avail_and_initialized,
is_main_process,
)
def train(model, datamodule, args, device):
model.train()
model_without_ddp = model.module if is_dist_avail_and_initialized() else model
optimizer_params = add_weight_decay(model, args["weight_decay"])
optimizer = AdamW(optimizer_params, lr=args["lr"])
scheduler = CosineAnnealingWarmRestarts(
optimizer, T_0=args["max_epochs"], eta_min=args["min_lr"]
)
step_size = args["step_size"]
warmup_steps = args["warmup_steps"]
warmup_iterations = warmup_steps * step_size
data_loader = datamodule.train_dataloader(
is_distributed=is_dist_avail_and_initialized(),
num_tasks=get_world_size(),
global_rank=get_rank(),
)
start_time = time.time()
for epoch in range(args["max_epochs"]):
if epoch > 0:
scheduler.step(epoch + warmup_steps)
for batch, (image, text, text_atts, idx) in enumerate(data_loader):
if epoch > 0:
alpha = args["alpha"]
else:
alpha = args["alpha"] * min(1, batch / len(data_loader))
image = image.to(device, non_blocking=True)
text = text.to(device)
text_atts = text_atts.to(device)
idx = idx.to(device, non_blocking=True)
loss = model(image, text, text_atts, idx, alpha, is_train=True)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch == 0 and batch % step_size == 0 and batch <= warmup_iterations:
scheduler.step(batch // step_size)
if batch % args["log_every_n_steps"] == 0:
total_time = time.time() - start_time
time_str = "time {},".format(
datetime.timedelta(seconds=int(total_time))
)
epoch_str = "epoch {}/{},".format(epoch, args["max_epochs"])
batch_str = "batch {}/{},".format(batch, len(data_loader))
loss_str = "loss {}".format(loss.item())
print(time_str, epoch_str, batch_str, loss_str)
if is_main_process():
save_obj = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": scheduler.state_dict(),
"epoch": epoch,
}
torch.save(
save_obj,
os.path.join(
args["checkpoint_root"], "retrieval_checkpoint_%02d.pt" % epoch
),
)
if is_dist_avail_and_initialized():
dist.barrier()
torch.cuda.empty_cache()
@torch.no_grad()
def encode_text(model, text_dataloader, device):
text_embeds = []
text_feats = []
text_atts = []
for text, text_att in text_dataloader:
text = text.to(device)
text_att = text_att.to(device)
text_embed, text_feat = model(
text=text, text_atts=text_att, input_type="text", is_train=False
)
text_embeds.append(text_embed)
text_feats.append(text_feat)
text_atts.append(text_att)
text_embeds = torch.cat(text_embeds, dim=0)
text_feats = torch.cat(text_feats, dim=0)
text_atts = torch.cat(text_atts, dim=0)
return text_embeds, text_feats, text_atts
@torch.no_grad()
def encode_image(model, image_dataloader, device):
image_embeds = []
image_feats = []
for image in image_dataloader:
image = image.to(device)
image_embed, image_feat = model(image=image, input_type="image", is_train=False)
image_embeds.append(image_embed)
image_feats.append(image_feat)
image_embeds = torch.cat(image_embeds, dim=0)
image_feats = torch.cat(image_feats, dim=0)
return image_embeds, image_feats
@torch.no_grad()
def image_to_text(
model,
image_embeds,
text_embeds,
text_atts,
sims_matrix,
num_images,
num_text,
device,
args,
):
start_time = time.time()
world_size = get_world_size()
rank = get_rank()
step = sims_matrix.size(0) // world_size + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
k = args["k_test"]
image_to_text_scores = torch.full((num_images, num_text), -100.0).to(device)
for i, sims in enumerate(sims_matrix[start:end]):
_, topk_idx = sims.topk(k, dim=0)
score = model(
image=image_embeds[start + i].repeat(k, 1, 1),
text=text_embeds[topk_idx],
text_atts=text_atts[topk_idx],
input_type="multimodal",
is_train=False,
)
image_to_text_scores[start + i, topk_idx] = score
if i % args["log_every_n_steps"] == 0:
total_time = time.time() - start_time
time_str = "time {},".format(datetime.timedelta(seconds=int(total_time)))
batch_str = "batch {}/{},".format(i, len(sims_matrix[start:end]))
print("image to text retrieval", time_str, batch_str)
return image_to_text_scores
@torch.no_grad()
def text_to_image(
model,
image_embeds,
text_embeds,
text_atts,
sims_matrix,
num_images,
num_text,
device,
args,
):
start_time = time.time()
world_size = get_world_size()
rank = get_rank()
step = sims_matrix.size(0) // world_size + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
k = args["k_test"]
text_to_image_scores = torch.full((num_text, num_images), -100.0).to(device)
for i, sims in enumerate(sims_matrix[start:end]):
_, topk_idx = sims.topk(k, dim=0)
score = model(
image=image_embeds[topk_idx],
text=text_embeds[start + i].repeat(k, 1, 1),
text_atts=text_atts[start + i].repeat(k, 1, 1),
input_type="multimodal",
is_train=False,
)
text_to_image_scores[start + i, topk_idx] = score
if i % args["log_every_n_steps"] == 0:
total_time = time.time() - start_time
time_str = "time {},".format(datetime.timedelta(seconds=int(total_time)))
batch_str = "batch {}/{},".format(i, len(sims_matrix[start:end]))
print("text to image retrieval", time_str, batch_str)
return text_to_image_scores
@torch.no_grad()
def evaluation(model, datamodule, args, device):
model.eval()
text_loader = datamodule.text_dataloader()
image_loader = datamodule.image_dataloader()
num_images = len(datamodule.image_dataset)
num_text = len(datamodule.text_dataset)
text_embeds, text_feats, text_atts = encode_text(model, text_loader, device)
image_embeds, image_feats = encode_image(model, image_loader, device)
sims_matrix = image_feats @ text_feats.t()
image_to_text_scores = image_to_text(
model,
image_embeds,
text_embeds,
text_atts,
sims_matrix,
num_images,
num_text,
device,
args,
)
sims_matrix = sims_matrix.t()
text_to_image_scores = text_to_image(
model,
image_embeds,
text_embeds,
text_atts,
sims_matrix,
num_images,
num_text,
device,
args,
)
if is_dist_avail_and_initialized():
dist.barrier()
torch.distributed.all_reduce(
image_to_text_scores, op=torch.distributed.ReduceOp.SUM
)
torch.distributed.all_reduce(
text_to_image_scores, op=torch.distributed.ReduceOp.SUM
)
return image_to_text_scores.cpu(), text_to_image_scores.cpu()
@torch.no_grad()
def itm_eval(
image_to_text_scores,
text_to_image_scores,
image_to_text_mapping,
text_to_image_mapping,
):
# Images to Text
ranks = torch.zeros(image_to_text_scores.size(0))
for index, score in enumerate(image_to_text_scores):
inds = torch.flip(torch.argsort(score), dims=[0])
rank = 1e10
# each image has multiple text mappings
# check retrieved inds with each ground truth mappping i
for i in image_to_text_mapping[index]:
tmp = torch.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
# Compute metrics
tr1 = 100.0 * len(torch.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(torch.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(torch.where(ranks < 10)[0]) / len(ranks)
# Text to Images
ranks = torch.zeros(text_to_image_scores.size(0))
for index, score in enumerate(text_to_image_scores):
inds = torch.flip(torch.argsort(score), dims=[0])
ranks[index] = torch.where(inds == text_to_image_mapping[index])[0][0]
# Compute metrics
ir1 = 100.0 * len(torch.where(ranks < 1)[0]) / len(ranks)
ir5 = 100.0 * len(torch.where(ranks < 5)[0]) / len(ranks)
ir10 = 100.0 * len(torch.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
ir_mean = (ir1 + ir5 + ir10) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_result = {
"txt_r1": tr1,
"txt_r5": tr5,
"txt_r10": tr10,
"txt_r_mean": tr_mean,
"img_r1": ir1,
"img_r5": ir5,
"img_r10": ir10,
"img_r_mean": ir_mean,
"r_mean": r_mean,
}
return eval_result
@torch.no_grad()
def format_output(
image_to_text_scores,
text_to_image_scores,
image_dataset,
text_dataset,
):
image_to_text_output = {}
for index, score in enumerate(image_to_text_scores):
image = image_dataset.images[index]
top10_ids = torch.flip(torch.argsort(score), dims=[0])[:10]
top10_text = [text_dataset.text[i] for i in top10_ids]
image_to_text_output[index] = {
"image": image,
"output": top10_text,
}
text_to_image_output = {}
for index, score in enumerate(text_to_image_scores):
text = text_dataset.text[index]
top10_ids = torch.flip(torch.argsort(score), dims=[0])[:10]
top10_images = [image_dataset.images[i] for i in top10_ids]
text_to_image_output[index] = {
"text": text,
"output": top10_images,
}
return image_to_text_output, text_to_image_output
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="./examples/albef/configs/retrieval.yaml")
args = parser.parse_args()
config = yaml.load(open(args.config, "r"), Loader=yaml.Loader)
init_distributed_mode(config)
device = torch.device(config["device"])
seed = config["seed"] + get_rank()
torch.manual_seed(seed)
random.seed(seed)
cudnn.benchmark = True
datamodule = RetrievalDataModule(**config["datamodule_args"])
model = albef_model_for_retrieval(config, pretrained=True)
model = model.to(device)
if is_dist_avail_and_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[config["gpu"]]
)
train(model, datamodule, config["training_args"], device)
image_to_text_scores, text_to_image_scores = evaluation(
model, datamodule, config["eval_args"], device
)
val_result = itm_eval(
image_to_text_scores,
text_to_image_scores,
datamodule.image_dataset.image_to_text,
datamodule.text_dataset.text_to_image,
)
image_to_text_output, text_to_image_output = format_output(
image_to_text_scores,
text_to_image_scores,
datamodule.image_dataset,
datamodule.text_dataset,
)
result = {
"image_to_text_output": image_to_text_output,
"text_to_image_output": text_to_image_output,
**val_result,
}
torch.save(result, config["output_path"])
if __name__ == "__main__":
main()
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/finetune_retrieval.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import re
from typing import List, Tuple, Union
import torch
from torchtext.transforms import PadTransform, Sequential, ToTensor, Truncate
from torchvision import transforms
from transformers.models.bert.tokenization_bert import BertTokenizer
# mean and standard deviation from the ALBEF repo:
# https://github.com/salesforce/ALBEF/blob/main/dataset/__init__.py#L16
MEAN = (0.48145466, 0.4578275, 0.40821073)
STD_DEV = (0.26862954, 0.26130258, 0.27577711)
class ALBEFTextTransform:
"""
Remove punctuations and trailing spaces in input text and transform it into
a Tensor of token ids using BERTTokenizer.
Args:
pretrained_tokenizer (str): Pretrained tokenizer to use.
Default: "bert-base-uncased"
do_pre_process (bool): Whether to pre-process input text.
Defaults to True.
truncate (bool): Whether to truncate input text to max_seq_length.
Defaults to False.
pad_to_max_seq_len (bool): Whether to pad the sequence to max_seq_length.
add_end_token (bool): Whether to add the end-of-sentence token.
Defaults to True.
max_seq_len (int): The max sequence length after truncating or padding.
Defaults to 25.
cls_token_id (int): Value to represent the start of each text.
Defaults to 101, Hugging Face's BERT cls token id.
sep_token_id (int): Value to represent the end of each text.
Defaults to 102, Hugging Face's BERT sep token id.
pad_token_id (int): Value with which to pad each text so that all texts are the same length.
Defaults to 0, Hugging Face's BERT pad token id.
Inputs:
text (Union[List[str], str]): Input text to transform.
"""
def __init__(
self,
pretrained_tokenizer: str = "bert-base-uncased",
do_pre_process: bool = True,
truncate: bool = False,
pad_to_max_seq_len: bool = False,
add_end_token: bool = True,
max_seq_len: int = 25,
cls_token_id: int = 101,
sep_token_id: int = 102,
pad_token_id: int = 0,
):
self.do_pre_process = do_pre_process
self.cls_token_id = cls_token_id
self.sep_token_id = sep_token_id
self.pad_token_id = pad_token_id
self.add_end_token = add_end_token
self.tokenizer = BertTokenizer.from_pretrained(pretrained_tokenizer)
self.transform = Sequential(
Truncate(max_seq_len=max_seq_len) if truncate else torch.nn.Identity(),
ToTensor(padding_value=self.pad_token_id),
PadTransform(max_length=max_seq_len, pad_value=self.pad_token_id)
if pad_to_max_seq_len
else torch.nn.Identity(),
)
def pre_process(self, text: str) -> str:
text = (
re.sub(
r"([,.'!?\"()*#:;~])",
"",
text,
)
.replace("-", " ")
.replace("/", " ")
)
text = text.rstrip(" ")
return text
def __call__(self, text: Union[List[str], str]) -> torch.Tensor:
if self.do_pre_process:
if isinstance(text, str):
text = self.pre_process(text)
else:
text = [self.pre_process(t) for t in text]
tokens = self.tokenizer(text)["input_ids"]
if not self.add_end_token and tokens[-1] == self.sep_token_id:
tokens = tokens[:-1]
input_ids = self.transform(tokens)
return input_ids
def training_image_transform(
image_size: int = 384,
scale: Tuple[float, float] = (0.5, 1.0),
image_interpolation=transforms.InterpolationMode.BICUBIC,
mean: Tuple[float, float, float] = MEAN,
std_dev: Tuple[float, float, float] = STD_DEV,
) -> transforms.Compose:
return transforms.Compose(
[
transforms.RandomResizedCrop(
image_size, scale=scale, interpolation=image_interpolation
),
transforms.RandomHorizontalFlip(),
transforms.RandAugment(2, 7),
transforms.ToTensor(),
transforms.Normalize(mean, std_dev),
]
)
def testing_image_transform(
image_size: int = 384,
image_interpolation=transforms.InterpolationMode.BICUBIC,
mean: Tuple[float, float, float] = MEAN,
std_dev: Tuple[float, float, float] = STD_DEV,
) -> transforms.Compose:
return transforms.Compose(
[
transforms.Resize(
(image_size, image_size), interpolation=image_interpolation
),
transforms.ToTensor(),
transforms.Normalize(mean, std_dev),
]
)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/transforms.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Tuple
import torch
from data.transforms import (
ALBEFTextTransform,
testing_image_transform,
training_image_transform,
)
from data.vqa_dataset import VQADataset
from pytorch_lightning import LightningDataModule
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, DistributedSampler
class VQADataModule(LightningDataModule):
"""
The Data Module for Visual Question Answering task.
Args:
train_files (List[str]): The paths to training json files.
test_files (List[str]): The paths to testing json files.
answer_list (str): The path to the answers list.
vqa_root (str): The path to vqa data directory.
vg_root (str): The path to vg data directory.
batch_size (int): The sampling batch size.
num_workers (int): The number of workers for the distributed mode.
"""
def __init__(
self,
train_files: List[str],
test_files: List[str],
answer_list: str,
vqa_root: str,
vg_root: str,
batch_size: int,
num_workers: int,
) -> None:
super().__init__()
self.train_dataset = VQADataset(
train_files,
vqa_root,
vg_root,
image_transform=training_image_transform(),
question_transform=ALBEFTextTransform(
truncate=True, max_seq_len=25, add_end_token=False
),
answer_transform=ALBEFTextTransform(do_pre_process=False),
split="train",
)
self.test_dataset = VQADataset(
test_files,
vqa_root,
vg_root,
image_transform=testing_image_transform(),
question_transform=ALBEFTextTransform(add_end_token=False),
answer_transform=ALBEFTextTransform(do_pre_process=False),
split="test",
answer_list=answer_list,
)
self.batch_size = batch_size
self.num_workers = num_workers
def _get_sampler(
self,
dataset: VQADataset,
shuffle: bool,
is_distributed: bool,
num_tasks: int,
global_rank: int,
) -> Optional[DistributedSampler]:
if not is_distributed:
return None
return DistributedSampler(
dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle
)
def train_dataloader(
self,
is_distributed: bool = False,
num_tasks: int = 0,
global_rank: int = 0,
drop_last: bool = True,
) -> DataLoader:
"""
DataLoader Outputs:
images (Tensor): Tensor of shape (B, C, W, H) of image inputs.
questions (Tensor): Tensor of shape (B, L) of question inputs.
question_atts (Tensor): Tensor of shape (B, L) of question attention mask.
answers (Tensor): Tensor of shape (N, M) of answer inputs.
N >= B because a vqa sample can have multiple answers.
answer_atts (Tensor): Tensor of shape (N, M) of answer attention mask.
weights (Tensor): Tensor of shape (N) of answer weights.
ans_lengths (List[int]): List of length B and sum N where
ans_lengths[i] = number of answers for images[i] and questions[i].
"""
sampler = self._get_sampler(
dataset=self.train_dataset,
shuffle=True,
is_distributed=is_distributed,
num_tasks=num_tasks,
global_rank=global_rank,
)
shuffle = sampler is None
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=sampler,
shuffle=shuffle,
collate_fn=vqa_train_collate_fn,
drop_last=drop_last,
)
def test_dataloader(
self,
is_distributed: bool = False,
num_tasks: int = 0,
global_rank: int = 0,
drop_last=False,
) -> DataLoader:
"""
DataLoader Outputs:
images (Tensor): Tensor of shape (B, C, W, H) of image inputs.
questions (Tensor): Tensor of shape (B, L) of question inputs.
question_atts (Tensor): Tensor of shape (B, L) of question attention mask.
question_ids (List): List of length B of question ids.
"""
sampler = self._get_sampler(
dataset=self.test_dataset,
shuffle=False,
is_distributed=is_distributed,
num_tasks=num_tasks,
global_rank=global_rank,
)
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=sampler,
shuffle=False,
collate_fn=vqa_test_collate_fn,
drop_last=drop_last,
)
def vqa_train_collate_fn(
batch: List[Tuple[Tensor, Tensor, List[Tensor], List[float]]]
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, List[int]]:
image_list = []
question_list = []
answer_list = []
weight_list = []
ans_lengths = []
for image, question, answer, weights in batch:
image_list.append(image)
question_list.append(question)
answer_list += answer
weight_list += weights
ans_lengths.append(len(answer))
images = torch.stack(image_list, dim=0)
questions = pad_sequence(question_list, batch_first=True)
question_atts = (questions != 0).type(torch.long)
answers = pad_sequence(answer_list, batch_first=True)
answer_atts = (answers != 0).type(torch.long)
weights = torch.Tensor(weight_list)
return (
images,
questions,
question_atts,
answers,
answer_atts,
weights,
ans_lengths,
)
def vqa_test_collate_fn(
batch: List[Tuple[Tensor, Tensor, int]]
) -> Tuple[Tensor, Tensor, Tensor, List[int]]:
image_list, question_list, question_ids = [], [], []
for image, question, question_id in batch:
image_list.append(image)
question_list.append(question)
question_ids.append(question_id)
images = torch.stack(image_list, dim=0)
questions = pad_sequence(question_list, batch_first=True)
question_atts = (questions != 0).type(torch.long)
return (
images,
questions,
question_atts,
question_ids,
)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/vqa_datamodules.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from typing import Callable, List, Tuple, Union
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset
class RetrievalTrainingDataset(Dataset):
"""
Create the training dataset for Retrieval task.
Args:
ann_file (List[str]): The paths to training annotation json files.
image_root (str): The path to image data directory.
image_transform (Callable[[Image.Image], Tensor]): Image data transform.
text_transform (Callable[[Union[List[str], str]], Tensor]): Text data transform.
Dataset Outputs:
image (Tensor): Transformed image input tensor of shape (C, H, W).
caption (Tensor): Transformed text token input ids.
idx (int): The unique identifier for the image.
"""
def __init__(
self,
ann_file: List[str],
image_root: str,
image_transform: Callable[[Image.Image], Tensor],
text_transform: Callable[[Union[List[str], str]], Tensor],
) -> None:
self.ann = []
for f in ann_file:
self.ann += json.load(open(f, "r"))
self.image_root = image_root
self.image_transform = image_transform
self.text_transform = text_transform
self.idx = {} # map str image_id from dataset to int ids
i = 0
for ann in self.ann:
image_id = ann["image_id"]
if image_id not in self.idx.keys():
self.idx[image_id] = i
i += 1
def __len__(self) -> int:
return len(self.ann)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, int]:
ann = self.ann[index]
image_path = os.path.join(self.image_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.image_transform(image)
caption = self.text_transform(ann["caption"])
return image, caption, self.idx[ann["image_id"]]
class ImageToTextRetrievalDataset(Dataset):
"""
Create the dataset for Image-to-Text Retrieval task.
Args:
ann_file (List[str]): The paths to annotation json files.
image_root (str): The path to image data directory.
image_transform (Callable[[Image.Image], Tensor]): Image data transform.
Dataset Outputs:
image (Tensor): Transformed image input tensor of shape (C, H, W).
"""
def __init__(
self,
ann_file: List[str],
image_root: str,
image_transform: Callable[[Image.Image], Tensor],
) -> None:
self.image_root = image_root
self.image_transform = image_transform
self.ann = []
self.images = [] # paths to all images in the dataset
self.image_to_text = {} # map image ids to text ids for evaluation
for f in ann_file:
self.ann += json.load(open(f, "r"))
text_id = 0
for image_id, ann in enumerate(self.ann):
self.images.append(ann["image"])
num_text = len(ann["caption"])
self.image_to_text[image_id] = list(range(text_id, text_id + num_text))
text_id += num_text
def __len__(self) -> int:
return len(self.images)
def __getitem__(self, index: int) -> Tensor:
image_path = os.path.join(self.image_root, self.images[index])
image = Image.open(image_path).convert("RGB")
image = self.image_transform(image)
return image
class TextToImageRetrievalDataset(Dataset):
"""
Create the dataset for Text-to-Image Retrieval task.
Args:
ann_file (List[str]): The paths to annotation json files.
text_transform (Callable[[Union[List[str], str]], Tensor]): Text data transform.
Dataset Outputs:
text (Tensor): Transformed text token input ids.
"""
def __init__(
self,
ann_file: List[str],
text_transform: Callable[[Union[List[str], str]], Tensor],
) -> None:
self.text_transform = text_transform
self.ann = []
self.text = [] # all text strings in the dataset
self.text_to_image = {} # map text ids to image ids for evaluation
for f in ann_file:
self.ann += json.load(open(f, "r"))
text_id = 0
for image_id, ann in enumerate(self.ann):
for caption in ann["caption"]:
self.text.append(caption)
self.text_to_image[text_id] = image_id
text_id += 1
def __len__(self) -> int:
return len(self.text)
def __getitem__(self, index: int) -> Tensor:
text = self.text_transform(self.text[index])
return text
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/retrieval_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Tuple
import torch
from data.retrieval_dataset import (
ImageToTextRetrievalDataset,
RetrievalTrainingDataset,
TextToImageRetrievalDataset,
)
from data.transforms import (
ALBEFTextTransform,
testing_image_transform,
training_image_transform,
)
from pytorch_lightning import LightningDataModule
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, DistributedSampler
class RetrievalDataModule(LightningDataModule):
"""
The Data Module for Retrieval task.
Args:
train_files (List[str]): The paths to training json files.
test_files (List[str]): The paths to testing json files.
image_root (str): The path to image data directory.
batch_size (int): The sampling batch size.
num_workers (int): The number of workers for the distributed mode.
"""
def __init__(
self,
train_files: List[str],
test_files: List[str],
image_root: str,
batch_size: int,
num_workers: int,
) -> None:
super().__init__()
self.train_dataset = RetrievalTrainingDataset(
train_files,
image_root,
training_image_transform(),
ALBEFTextTransform(truncate=True, max_seq_len=30, add_end_token=False),
)
self.image_dataset = ImageToTextRetrievalDataset(
test_files,
image_root,
testing_image_transform(),
)
self.text_dataset = TextToImageRetrievalDataset(
test_files,
ALBEFTextTransform(
truncate=True,
pad_to_max_seq_len=True,
max_seq_len=30,
add_end_token=False,
),
)
self.batch_size = batch_size
self.num_workers = num_workers
def _get_sampler(
self,
dataset: Dataset,
shuffle: bool,
is_distributed: bool,
num_tasks: int,
global_rank: int,
) -> Optional[DistributedSampler]:
# do not return a sampler if is not in distributed mode
# a default RandomSampler is used in this case
if not is_distributed:
return None
return DistributedSampler(
dataset, num_replicas=num_tasks, rank=global_rank, shuffle=shuffle
)
def train_dataloader(
self,
is_distributed: bool = False,
num_tasks: int = 0,
global_rank: int = 0,
drop_last: bool = True,
) -> DataLoader:
"""
DataLoader Outputs:
images (Tensor): Tensor of shape (B, C, W, H) of image inputs.
text (Tensor): Tensor of shape (B, L) of text inputs.
text_atts (Tensor): Tensor of shape (B, L) of text attention mask.
idx (Tensor): Tensor of shape (B) of image identifiers.
"""
sampler = self._get_sampler(
dataset=self.train_dataset,
shuffle=True,
is_distributed=is_distributed,
num_tasks=num_tasks,
global_rank=global_rank,
)
shuffle = sampler is None
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=sampler,
shuffle=shuffle,
collate_fn=retrieval_train_collate_fn,
drop_last=drop_last,
)
def image_dataloader(
self,
drop_last: bool = False,
) -> DataLoader:
"""
DataLoader Outputs:
images (Tensor): Tensor of shape (B, C, W, H) of image inputs.
"""
return DataLoader(
self.image_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=None,
shuffle=False,
collate_fn=None,
drop_last=drop_last,
)
def text_dataloader(
self,
drop_last: bool = False,
) -> DataLoader:
"""
DataLoader Outputs:
text (Tensor): Tensor of shape (B, L) of text inputs.
text_atts (Tensor): Tensor of shape (B, L) of text attention mask.
"""
return DataLoader(
self.text_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True,
sampler=None,
shuffle=False,
collate_fn=text_collate_fn,
drop_last=drop_last,
)
def retrieval_train_collate_fn(
batch: List[Tuple[Tensor, Tensor, int]]
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
image_list = []
text_list = []
idx_list = []
for image, text, idx in batch:
image_list.append(image)
text_list.append(text)
idx_list.append(idx)
images = torch.stack(image_list, dim=0)
text = pad_sequence(text_list, batch_first=True)
text_atts = (text != 0).type(torch.long)
idx = Tensor(idx_list).type(torch.long)
return (
images,
text,
text_atts,
idx,
)
def text_collate_fn(batch: List[Tensor]) -> Tuple[Tensor, Tensor]:
text = pad_sequence(batch, batch_first=True)
text_atts = (text != 0).type(torch.long)
return text, text_atts
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/retrieval_datamodule.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
from typing import Callable, List, Tuple, Union
import torch
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset
class VQADataset(Dataset):
"""
Create the dataset for VQA task.
Args:
ann_file (List[str]): The paths to annotation json files.
vqa_root (str): The path to vqa data directory.
vg_root (str): The path to vg data directory.
image_transform (Callable[[Image.Image], Tensor]): image data transform.
question_transform (Callable[[Union[List[str], str]], Tensor]): text data transform for questions.
answer_transform (Callable[[Union[List[str], str]], Tensor]): text data transform for answers.
split (str): Indicates train or test. Default is train.
answer_list (str): The path to the answers list. Required for test split.
Dataset Outputs:
if split is train:
image (Tensor): Transformed image input tensor of shape (C, W, H).
question (Tensor): Transformed question token input ids.
answers (List[Tensor]): List of transformed answers token input ids.
answer_weights (List[float]): List of answer weights.
answer_weights[i] is proportional to the number of occurences of answers[i]
if split is test:
image (Tensor): Transformed image input tensor of shape (C, W, H).
question (Tensor): Transformed text token input ids.
question_id (int): The question sample id.
"""
def __init__(
self,
ann_file: List[str],
vqa_root: str,
vg_root: str,
image_transform: Callable[[Image.Image], Tensor],
question_transform: Callable[[Union[List[str], str]], Tensor],
answer_transform: Callable[[Union[List[str], str]], Tensor],
split: str = "train",
answer_list: str = None,
) -> None:
self.ann = []
for f in ann_file:
self.ann += json.load(open(f, "r"))
self.vqa_root = vqa_root
self.vg_root = vg_root
self.image_transform = image_transform
self.question_transform = question_transform
self.answer_transform = answer_transform
self.split = split
if split == "test":
self.answer_list = json.load(open(answer_list, "r"))
self.answer_input_ids = self.answer_transform(self.answer_list)
self.answer_attention_mask = (self.answer_input_ids != 0).type(torch.long)
def __len__(self) -> int:
return len(self.ann)
def __getitem__(
self, index: int
) -> Union[
Tuple[Tensor, Tensor, int], Tuple[Tensor, Tensor, List[Tensor], List[float]]
]:
ann = self.ann[index]
image_root = self.vqa_root if ann["dataset"] == "vqa" else self.vg_root
image_path = os.path.join(image_root, ann["image"])
image = Image.open(image_path).convert("RGB")
image = self.image_transform(image)
question = self.question_transform(ann["question"])
if self.split == "test":
return image, question, ann["question_id"]
elif self.split == "train":
if ann["dataset"] == "vqa":
# Each VQA sample question has a list of answers (with potential repeats)
# answer_weight[answer] = count(answer) / len(answers for the question)
answer_weights = {}
for answer in ann["answer"]:
if answer in answer_weights.keys():
answer_weights[answer] += 1 / len(ann["answer"])
else:
answer_weights[answer] = 1 / len(ann["answer"])
answers = list(answer_weights.keys())
answer_weights = list(answer_weights.values())
elif ann["dataset"] == "vg":
# A VG sample question has one answer so assign it a constant weight (0.5)
answers = [ann["answer"]]
answer_weights = [0.5]
answers = list(self.answer_transform(answers))
return image, question, answers, answer_weights
else:
raise ValueError("dataset split should be train or test")
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/albef/data/vqa_dataset.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple
import torch
import torch.nn.functional as F
from scipy.optimize import linear_sum_assignment
from torch import nn, Tensor
from torchvision.ops.boxes import box_convert, generalized_box_iou
class HungarianMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network.
For efficiency reasons, the targets don't include the no_object. Because of this,
in general, there are more predictions than targets. In this case, we do a 1-to-1
matching of the best predictions, while the others are un-matched (and thus treated
as non-objects). This implementation is based on the MDETR repo:
https://github.com/ashkamath/mdetr/blob/main/models/matcher.py#L13
Attributes:
cost_class (float): Relative weight of the classification error in the
matching cost. Default: ``1``
cost_bbox (float): Relative weight of the L1 error of the bounding box
coordinates in the matching cost. Default: ``1``
cost_giou (float): Relative weight of the giou loss of the bounding box in
the matching cost. Default: ``1``
Args:
pred_logits (Tensor): Classification logits.
Size: (batch_size, num_queries, num_classes)
pred_boxes (Tensor): Predicted box coordinates.
Size: (batch_size, num_queries, 4)
target_boxes_per_sample (List[Tensor]): A list of target bounding boxes.
Length = batch_size.
Each element is a tensor of size (n_boxes_for_sample, 4).
positive_map (Tensor): :math:`\text{positive_map}[i,j] = 1` when box i maps to class j.
Size: (total_boxes, num_classes) where total_boxes is the sum of
n_boxes_for_sample over every sample in the batch.
Returns:
A list of size batch_size, containing tuples of ``(index_i, index_j)`` where:
- ``index_i`` is the indices of the selected predictions (in order)
- ``index_j`` is the indices of the corresponding selected targets
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
Raises:
ValueError: If all costs are zero or first dim of target boxes and positive map
don't match or classification cost and bbox cost shapes don't match.
"""
def __init__(
self, cost_class: float = 1, cost_bbox: float = 5, cost_giou: float = 2
):
super().__init__()
self.cost_class = cost_class
self.cost_bbox = cost_bbox
self.cost_giou = cost_giou
if cost_class == 0 and cost_bbox == 0 and cost_giou == 0:
raise ValueError("At least one cost must be nonzero")
@torch.no_grad()
def forward(
self,
pred_logits: Tensor,
pred_boxes: Tensor,
target_boxes_per_sample: List[Tensor],
positive_map: Tensor,
) -> List[Tuple[Tensor, Tensor]]:
bs, num_queries = pred_logits.shape[:2]
target_boxes = torch.cat(target_boxes_per_sample)
# We flatten to compute the cost matrices in a batch
out_prob = F.softmax(
pred_logits.flatten(0, 1), dim=-1
) # [batch_size * num_queries, num_classes]
out_bbox = pred_boxes.flatten(0, 1) # [batch_size * num_queries, 4]
if target_boxes.size(0) != positive_map.size(0):
raise ValueError(
"Total of target boxes should match first dim of positive map"
)
# Compute the soft-cross entropy between the predicted token alignment
# and the ground truth one for each box
cost_class = -(out_prob.unsqueeze(1) * positive_map.unsqueeze(0)).sum(-1)
# Compute the L1 cost between boxes
cost_bbox = torch.cdist(out_bbox, target_boxes, p=1)
if cost_class.shape != cost_bbox.shape:
raise ValueError(
f"""
Classification and bounding box cost shapes do not match.
Classification cost shape: {cost_class.shape},
Bounding box cost shape: {cost_bbox.shape}
"""
)
# Compute the giou cost betwen boxes
cost_giou = -generalized_box_iou(
box_convert(out_bbox, in_fmt="cxcywh", out_fmt="xyxy"),
box_convert(target_boxes, in_fmt="cxcywh", out_fmt="xyxy"),
)
# Final cost matrix
cost_matrix = (
self.cost_bbox * cost_bbox
+ self.cost_class * cost_class
+ self.cost_giou * cost_giou
)
cost_matrix = cost_matrix.view(bs, num_queries, -1).cpu()
sizes = [x.size(0) for x in target_boxes_per_sample]
indices = [
linear_sum_assignment(c[i])
for i, c in enumerate(cost_matrix.split(sizes, -1))
]
return [
(
torch.as_tensor(i, dtype=torch.int64),
torch.as_tensor(j, dtype=torch.int64),
)
for i, j in indices
]
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/mdetr/matcher.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import datetime
import json
import math
import random
import sys
import time
from copy import deepcopy
from pathlib import Path
from typing import Dict, Iterable, Optional
import numpy as np
import torch
import utils.dist as dist
from data.datamodule import GQADataModule
from loss import build_mdetr_loss, build_weight_dict
from matcher import HungarianMatcher
from optimizer import adjust_learning_rate, build_optimizer, update_ema
from torchmultimodal.models.mdetr.model import mdetr_for_vqa
from utils.args_parse import get_args_parser
from utils.metrics import MetricLogger, SmoothedValue
from utils.misc import targets_to
from vqa_eval import evaluate
def train_one_epoch(
model: torch.nn.Module,
matcher: torch.nn.Module,
loss: torch.nn.Module,
data_loader: Iterable,
weight_dict: Dict[str, float],
optimizer: torch.optim.Optimizer,
device: torch.device,
epoch: int,
args,
max_norm: float = 0,
model_ema: Optional[torch.nn.Module] = None,
):
model.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}"))
metric_logger.add_meter(
"lr_backbone", SmoothedValue(window_size=1, fmt="{value:.6f}")
)
metric_logger.add_meter(
"lr_text_encoder", SmoothedValue(window_size=1, fmt="{value:.6f}")
)
header = "Epoch: [{}]".format(epoch)
print_freq = 10
num_training_steps = int(len(data_loader) * args.epochs)
for i, batch_dict in enumerate(
metric_logger.log_every(data_loader, print_freq, header)
):
curr_step = epoch * len(data_loader) + i
samples = [x.to(device) for x in batch_dict["samples"]]
targets = batch_dict["targets"]
text = [t["tokenized"].to(device) for t in targets]
tokenized = batch_dict["batch_encoding"]
targets = targets_to(targets, device)
target_boxes = [t["boxes"] for t in targets]
answers = {k: v.to(device) for k, v in batch_dict["answers"].items()}
answer_types = {
k: v.to(device) for k, v in batch_dict["answer_type_mask"].items()
}
positive_map = (
batch_dict["positive_map"].to(device)
if "positive_map" in batch_dict
else None
)
outputs = model(
samples,
text,
)
indices = matcher(
outputs.model_output.pred_logits,
outputs.model_output.pred_boxes,
target_boxes,
positive_map,
)
loss_dict = loss(
outputs.model_output.pred_logits,
outputs.model_output.pred_boxes,
targets,
positive_map,
indices,
outputs.contrastive_embeddings.query_embeddings,
outputs.contrastive_embeddings.token_embeddings,
tokenized,
outputs.vqa_preds,
answers,
answer_types,
weight_dict,
)
losses = sum(
loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict
)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = dist.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {
f"{k}_unscaled": v for k, v in loss_dict_reduced.items()
}
loss_dict_reduced_scaled = {
k: v * weight_dict[k]
for k, v in loss_dict_reduced.items()
if k in weight_dict
}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
adjust_learning_rate(
optimizer,
epoch,
curr_step,
num_training_steps=num_training_steps,
args=args,
)
if model_ema is not None:
update_ema(model, model_ema, args.ema_decay)
metric_logger.update(
loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled
)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
metric_logger.update(lr_backbone=optimizer.param_groups[1]["lr"])
metric_logger.update(lr_text_encoder=optimizer.param_groups[2]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def main(args):
# Init distributed mode
dist.init_distributed_mode(args)
# Update dataset specific configs
if args.dataset_config is not None:
# https://stackoverflow.com/a/16878364
d = vars(args)
with open(args.dataset_config, "r") as f:
cfg = json.load(f)
d.update(cfg)
device = torch.device(args.device)
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
# fix the seed for reproducibility
seed = args.seed + rank
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.use_deterministic_algorithms(True)
# Set up datamodule
datamodule = GQADataModule(args)
datamodule.setup("train")
datamodule.setup("val")
train_loaders = datamodule.train_dataloader()
val_loader = datamodule.val_dataloader()
# Build the model
model = mdetr_for_vqa()
matcher = HungarianMatcher(
args.matcher_cost_class, args.matcher_cost_bbox, args.matcher_cost_giou
)
loss = build_mdetr_loss(True, args.no_object_weight, args.temperature)
model.to(device)
# Loss weights
weight_dict = build_weight_dict(args, model.vqa_heads.keys())
model_ema = deepcopy(model) if args.ema else None
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=False
)
model_without_ddp = model.module
optimizer = build_optimizer(model_without_ddp, args)
if args.load:
print("loading from", args.load)
checkpoint = torch.load(args.load, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"], strict=False)
if args.ema:
model_ema = deepcopy(model_without_ddp)
# Used for resuming training from the checkpoint of a model. Used when training times-out or is pre-empted.
if args.resume:
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location="cpu", check_hash=True
)
else:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
if not args.eval and "optimizer" in checkpoint and "epoch" in checkpoint:
optimizer.load_state_dict(checkpoint["optimizer"])
args.start_epoch = checkpoint["epoch"] + 1
if args.ema:
if "model_ema" not in checkpoint:
print(
"WARNING: ema model not found in checkpoint, resetting to current model"
)
model_ema = deepcopy(model_without_ddp)
else:
model_ema.load_state_dict(checkpoint["model_ema"])
print("Start training")
start_time = time.time()
best_metric = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.epoch_chunks > 0:
train_loader = train_loaders[epoch % len(train_loaders)]
sampler_train = datamodule.samplers_train[epoch % len(train_loaders)]
print(
f"Starting epoch {epoch // len(train_loader)}, sub_epoch {epoch % len(train_loaders)}"
)
else:
train_loader = train_loaders
print(f"Starting epoch {epoch}")
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model=model,
matcher=matcher,
loss=loss,
data_loader=train_loader,
weight_dict=weight_dict,
optimizer=optimizer,
device=device,
epoch=epoch,
args=args,
max_norm=args.clip_max_norm,
model_ema=model_ema,
)
if args.output_dir:
is_main_process = (
not torch.distributed.is_initialized()
) or torch.distributed.get_rank() == 0
output_dir = Path(args.output_dir)
checkpoint_paths = [output_dir / "checkpoint.pth"]
# extra checkpoint before LR drop and every 2 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 2 == 0:
checkpoint_paths.append(output_dir / f"checkpoint{epoch:04}.pth")
for checkpoint_path in checkpoint_paths:
if is_main_process:
torch.save(
{
"model": model_without_ddp.state_dict(),
"model_ema": model_ema.state_dict() if args.ema else None,
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
if epoch % args.eval_skip == 0:
test_stats = {}
test_model = model_ema if model_ema is not None else model
curr_test_stats = evaluate(
model=test_model,
matcher=matcher,
loss=loss,
data_loader=val_loader,
device=device,
weight_dict=weight_dict,
)
test_stats.update(curr_test_stats)
else:
test_stats = {}
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
**{f"test_{k}": v for k, v in test_stats.items()},
"epoch": epoch,
}
if args.output_dir and is_main_process:
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
if epoch % args.eval_skip == 0:
metric = test_stats["answer_total_accuracy_unscaled"]
if args.output_dir and metric > best_metric:
best_metric = metric
checkpoint_paths = [output_dir / "BEST_checkpoint.pth"]
# extra checkpoint before LR drop and every 100 epochs
for checkpoint_path in checkpoint_paths:
if is_main_process:
torch.save(
{
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"epoch": epoch,
"args": args,
},
checkpoint_path,
)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"DETR training and evaluation script", parents=[get_args_parser()]
)
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/mdetr/vqa_finetune.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import random
from copy import deepcopy
from pathlib import Path
import numpy as np
import torch
import utils.dist as dist
from data.datamodule import FlickrDataModule
from data.flickr_eval import FlickrEvaluator
from data.postprocessors import PostProcessFlickr
from torchmultimodal.models.mdetr.model import mdetr_for_phrase_grounding
from utils.args_parse import get_args_parser
from utils.metrics import MetricLogger
from utils.misc import targets_to
@torch.no_grad()
def evaluate(
model,
postprocessor,
data_loader,
evaluator,
device,
):
model.eval()
metric_logger = MetricLogger(delimiter=" ")
header = "Test:"
for batch_dict in metric_logger.log_every(data_loader, 10, header):
samples = [x.to(device) for x in batch_dict["samples"]]
targets = batch_dict["targets"]
text = [t["tokenized"].to(device) for t in targets]
targets = targets_to(targets, device)
positive_map = (
batch_dict["positive_map"].to(device)
if "positive_map" in batch_dict
else None
)
outputs = model(samples, text)
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
flickr_res = []
image_ids = [t["original_img_id"] for t in targets]
sentence_ids = [t["sentence_id"] for t in targets]
phrases_per_sample = [t["nb_eval"] for t in targets]
positive_map_eval = batch_dict["positive_map_eval"].to(device)
flickr_results = postprocessor(
outputs.pred_logits,
outputs.pred_boxes,
orig_target_sizes,
positive_map_eval,
phrases_per_sample,
)
assert len(flickr_results) == len(image_ids) == len(sentence_ids)
for im_id, sent_id, output in zip(image_ids, sentence_ids, flickr_results):
flickr_res.append(
{"image_id": im_id, "sentence_id": sent_id, "boxes": output}
)
evaluator.update(flickr_res)
# gather the stats from all processes
evaluator.synchronize_between_processes()
flickr_res = evaluator.summarize()
return flickr_res
def main(args):
# Init distributed mode
dist.init_distributed_mode(args)
# Update dataset specific configs
if args.dataset_config is not None:
# https://stackoverflow.com/a/16878364
d = vars(args)
with open(args.dataset_config, "r") as f:
cfg = json.load(f)
d.update(cfg)
device = torch.device(args.device)
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
# fix the seed for reproducibility
seed = args.seed + rank
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.use_deterministic_algorithms(True, warn_only=True)
# Set up datamodule
datamodule = FlickrDataModule(args)
datamodule.setup("val")
val_loader = datamodule.val_dataloader()
# Build the model
model = mdetr_for_phrase_grounding(
args.num_queries,
args.num_classes,
)
model.to(device)
model_ema = deepcopy(model) if args.ema else None
model_without_ddp = model
# TODO: consolidate with other is_distributed logic
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True
)
model_without_ddp = model.module
if args.resume.startswith("https"):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location="cpu", check_hash=True
)
else:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
# Load EMA model
if "model_ema" not in checkpoint:
print("WARNING: ema model not found in checkpoint, resetting to current model")
model_ema = deepcopy(model_without_ddp)
else:
model_ema.load_state_dict(checkpoint["model_ema"])
# For eval we only need the model and not the contrastive projections
test_model = model_ema.model if model_ema is not None else model.model
# Construct evaluator
evaluator = FlickrEvaluator(
args.flickr_dataset_path,
subset="test" if args.test else "val",
merge_boxes=args.GT_type == "merged",
)
postprocessor = PostProcessFlickr()
test_stats = evaluate(
model=test_model,
postprocessor=postprocessor,
data_loader=val_loader,
evaluator=evaluator,
device=device,
)
print(test_stats)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"DETR training and evaluation script", parents=[get_args_parser()]
)
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
EXA-1-master
|
exa/libraries/multimodal-main/examples/mdetr/phrase_grounding.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.