Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
lightly | lightly-master/tests/loss/test_VICRegLLoss.py | import unittest
from typing import List
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from lightly.loss import VICRegLLoss
class TestVICRegLLoss(unittest.TestCase):
def test_forward(self) -> None:
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)) for _ in range(2)]
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(6)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(6)]
loss = criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
assert loss > 0
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available")
def test_forward__cuda(self) -> None:
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)).cuda(), torch.randn((2, 7, 7, 8)).cuda())
for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)).cuda() for _ in range(2)]
local_view_features = [
(torch.randn((2, 32)).cuda(), torch.randn((2, 4, 4, 8)).cuda())
for _ in range(6)
]
local_view_grids = [torch.randn((2, 4, 4, 2)).cuda() for _ in range(6)]
loss = criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
assert loss > 0
def test_forward__error_global_view_features_and_grids_not_same_length(
self,
) -> None:
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
global_view_grids = [torch.randn((2, 7, 7, 2)) for _ in range(1)]
error_msg = (
"global_view_features and global_view_grids must have same length but "
"found 2 and 1."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
)
def test_forward__error_local_view_features_and_grids_not_same_length(self) -> None:
criterion = VICRegLLoss()
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(2)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(1)]
error_msg = (
"local_view_features and local_view_grids must have same length but found "
"2 and 1."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
def test_forward__error_local_view_features_and_grids_must_both_be_set(
self,
) -> None:
criterion = VICRegLLoss()
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(2)
]
local_view_grids = [torch.randn((2, 4, 4, 2)) for _ in range(2)]
error_msg = (
"local_view_features and local_view_grids must either both be set or None "
"but found <class 'list'> and <class 'NoneType'>."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=local_view_features,
local_view_grids=None,
)
error_msg = (
"local_view_features and local_view_grids must either both be set or None "
"but found <class 'NoneType'> and <class 'list'>."
)
with self.assertRaisesRegex(ValueError, error_msg):
criterion.forward(
global_view_features=[],
global_view_grids=[],
local_view_features=None,
local_view_grids=local_view_grids,
)
def test_global_loss__compare(self):
# Compare against original implementation.
torch.manual_seed(0)
criterion = VICRegLLoss()
global_view_features = [
(torch.randn((2, 32)), torch.randn((2, 7, 7, 8))) for _ in range(2)
]
local_view_features = [
(torch.randn((2, 32)), torch.randn((2, 4, 4, 8))) for _ in range(6)
]
loss = criterion._global_loss(
global_view_features=global_view_features,
local_view_features=local_view_features,
)
embedding = [x for x, _ in global_view_features + local_view_features]
expected_loss = _reference_global_loss(embedding=embedding)
assert loss == expected_loss
# Note: We cannot compare our local loss implementation against the original code
# because the resulting values slightly differ. See VICRegLLoss._local_loss for
# details.
def _reference_global_loss(
embedding: List[Tensor],
inv_coeff: float = 25.0,
var_coeff: float = 25.0,
cov_coeff: float = 1.0,
) -> Tensor:
# Original global loss from VICRegL:
# https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L421
def center(x):
return x - x.mean(dim=0)
def off_diagonal(x: Tensor) -> Tensor:
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
num_views = len(embedding)
inv_loss = 0.0
iter_ = 0
for i in range(2):
for j in np.delete(np.arange(np.sum(num_views)), i):
inv_loss = inv_loss + F.mse_loss(embedding[i], embedding[j])
iter_ = iter_ + 1
inv_loss = inv_coeff * inv_loss / iter_
var_loss = 0.0
cov_loss = 0.0
iter_ = 0
embedding_dim = embedding[0].shape[1]
for i in range(num_views):
x = center(embedding[i])
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
var_loss = var_loss + torch.mean(torch.relu(1.0 - std_x))
cov_x = (x.T @ x) / (x.size(0) - 1)
cov_loss = cov_loss + off_diagonal(cov_x).pow_(2).sum().div(embedding_dim)
iter_ = iter_ + 1
var_loss = var_coeff * var_loss / iter_
cov_loss = cov_coeff * cov_loss / iter_
return inv_loss + var_loss + cov_loss
| 6,906 | 36.134409 | 116 | py |
lightly | lightly-master/tests/loss/test_VICRegLoss.py | import unittest
import pytest
import torch
import torch.nn.functional as F
from pytest_mock import MockerFixture
from torch import Tensor
from torch import distributed as dist
from lightly.loss import VICRegLoss
class TestVICRegLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
VICRegLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
VICRegLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestVICRegLossUnitTest(unittest.TestCase):
# Old tests in unittest style, please add new tests to TestVICRegLoss using pytest.
def test_forward_pass(self):
loss = VICRegLoss()
for bsz in range(2, 4):
x0 = torch.randn((bsz, 32))
x1 = torch.randn((bsz, 32))
# symmetry
l1 = loss(x0, x1)
l2 = loss(x1, x0)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available")
def test_forward_pass_cuda(self):
loss = VICRegLoss()
for bsz in range(2, 4):
x0 = torch.randn((bsz, 32)).cuda()
x1 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = loss(x0, x1)
l2 = loss(x1, x0)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass__error_batch_size_1(self):
loss = VICRegLoss()
x0 = torch.randn((1, 32))
x1 = torch.randn((1, 32))
with self.assertRaises(AssertionError):
loss(x0, x1)
def test_forward_pass__error_different_shapes(self):
loss = VICRegLoss()
x0 = torch.randn((2, 32))
x1 = torch.randn((2, 16))
with self.assertRaises(AssertionError):
loss(x0, x1)
def test_forward__compare(self) -> None:
# Compare against original implementation.
loss = VICRegLoss()
x0 = torch.randn((2, 32))
x1 = torch.randn((2, 32))
assert loss(x0, x1).item() == _reference_vicreg_loss(x0, x1).item()
def test_forward__compare_vicregl(self) -> None:
# Compare against implementation in VICRegL.
# Note: nu_param is set to 0.5 because our loss implementation follows the
# original VICReg implementation and there is a slight difference between the
# implementations in VICReg and VICRegL.
loss = VICRegLoss(nu_param=0.5)
x0 = torch.randn((2, 10, 32))
x1 = torch.randn((2, 10, 32))
assert loss(x0, x1).item() == _reference_vicregl_vicreg_loss(x0, x1).item()
def _reference_vicreg_loss(
x: Tensor,
y: Tensor,
sim_coeff: float = 25.0,
std_coeff: float = 25.0,
cov_coeff: float = 1.0,
):
# Original VICReg loss from:
# https://github.com/facebookresearch/vicreg/blob/4e12602fd495af83efd1631fbe82523e6db092e0/main_vicreg.py#L194
def off_diagonal(x):
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
batch_size = x.shape[0]
num_features = x.shape[-1]
repr_loss = F.mse_loss(x, y)
x = x - x.mean(dim=0)
y = y - y.mean(dim=0)
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
std_y = torch.sqrt(y.var(dim=0) + 0.0001)
std_loss = torch.mean(F.relu(1 - std_x)) / 2 + torch.mean(F.relu(1 - std_y)) / 2
cov_x = (x.T @ x) / (batch_size - 1)
cov_y = (y.T @ y) / (batch_size - 1)
cov_loss = off_diagonal(cov_x).pow_(2).sum().div(num_features) + off_diagonal(
cov_y
).pow_(2).sum().div(num_features)
loss = sim_coeff * repr_loss + std_coeff * std_loss + cov_coeff * cov_loss
return loss
def _reference_vicregl_vicreg_loss(
x: Tensor,
y: Tensor,
inv_coeff: float = 25.0,
var_coeff: float = 25.0,
cov_coeff: float = 1.0,
) -> Tensor:
# Loss implementation from VICRegL:
# https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L284
repr_loss = inv_coeff * F.mse_loss(x, y)
x = x - x.mean(0)
y = y - y.mean(0)
std_x = torch.sqrt(x.var(dim=0) + 0.0001)
std_y = torch.sqrt(y.var(dim=0) + 0.0001)
std_loss = var_coeff * (
torch.mean(F.relu(1.0 - std_x)) / 2 + torch.mean(F.relu(1.0 - std_y)) / 2
)
x = x.permute((1, 0, 2))
y = y.permute((1, 0, 2))
*_, sample_size, num_channels = x.shape
non_diag_mask = ~torch.eye(num_channels, device=x.device, dtype=torch.bool)
# Center features
# centered.shape = NC
x = x - x.mean(dim=-2, keepdim=True)
y = y - y.mean(dim=-2, keepdim=True)
cov_x = torch.einsum("...nc,...nd->...cd", x, x) / (sample_size - 1)
cov_y = torch.einsum("...nc,...nd->...cd", y, y) / (sample_size - 1)
cov_loss = (cov_x[..., non_diag_mask].pow(2).sum(-1) / num_channels) / 2 + (
cov_y[..., non_diag_mask].pow(2).sum(-1) / num_channels
) / 2
cov_loss = cov_loss.mean()
cov_loss = cov_coeff * cov_loss
return repr_loss + std_loss + cov_loss
| 5,386 | 32.459627 | 116 | py |
lightly | lightly-master/tests/loss/test_barlow_twins_loss.py | import pytest
from pytest_mock import MockerFixture
from torch import distributed as dist
from lightly.loss.barlow_twins_loss import BarlowTwinsLoss
class TestBarlowTwinsLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
BarlowTwinsLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
BarlowTwinsLoss(gather_distributed=True)
mock_is_available.assert_called_once()
| 792 | 33.478261 | 88 | py |
lightly | lightly-master/tests/models/test_ModelUtils.py | import copy
import unittest
import torch
import torch.nn as nn
from lightly.models import utils
from lightly.models.utils import (
_no_grad_trunc_normal,
activate_requires_grad,
batch_shuffle,
batch_unshuffle,
deactivate_requires_grad,
nearest_neighbors,
normalize_weight,
update_momentum,
)
def has_grad(model: nn.Module):
"""Helper method to check if a model has `requires_grad` set to True"""
has_grad_ = False
for param in model.parameters():
if param.requires_grad == True:
has_grad_ = True
break
return has_grad_
class TestModelUtils(unittest.TestCase):
def _assert_tensor_equal(self, x, y):
# If the assertion fails then only an "assertion is not True" error is
# shown without showing the contents of x and y. To help debugging, x
# and y are printed. Note that the output is only shown if the assertion
# fails.
print(x)
print(y)
self.assertTrue(torch.equal(x, y))
def test_batch_shuffle(self, seed=0):
torch.manual_seed(seed)
x1 = torch.rand((4, 3, 64, 64))
x1_shuffled, shuffle = batch_shuffle(x1)
out1 = batch_unshuffle(x1_shuffled, shuffle)
self.assertTrue(torch.equal(x1, out1))
self.assertFalse(torch.equal(x1, x1_shuffled))
def test_activate_requires_grad(self):
model = nn.Sequential(
nn.Linear(32, 32),
nn.ReLU(),
)
self.assertTrue(has_grad(model))
deactivate_requires_grad(model)
self.assertFalse(has_grad(model))
activate_requires_grad(model)
self.assertTrue(has_grad(model))
def test_momentum_works(self):
model = nn.Sequential(
nn.Linear(32, 32),
nn.ReLU(),
)
model_momentum = copy.deepcopy(model)
update_momentum(model, model_momentum, 0.99)
def test_normalize_weight_linear(self):
input_dim = 32
output_dim = 64
linear = nn.Linear(input_dim, output_dim, bias=False)
normalize_weight(linear.weight, dim=0)
self.assertEqual(linear.weight.norm(dim=0).sum(), input_dim)
normalize_weight(linear.weight, dim=1)
self.assertEqual(linear.weight.norm(dim=1).sum(), output_dim)
def test_no_grad_trunc_normal(self, device="cpu", seed=0):
torch.manual_seed(seed)
tensor = torch.rand((8, 16)).to(device)
a = -2
b = 2
_no_grad_trunc_normal(tensor, mean=0, std=1, a=-2, b=2)
self.assertTrue(tensor.min() >= a)
self.assertTrue(tensor.max() <= b)
@unittest.skipUnless(torch.cuda.is_available(), "No cuda available")
def test_no_grad_trunc_normal_cuda(self, seed=0):
self.test_no_grad_trunc_normal(device="cuda")
def test_repeat_token(self):
token = torch.Tensor([[[1, 2, 3, 4]]])
out = utils.repeat_token(token, size=(2, 3))
self.assertEqual(tuple(out.shape), (2, 3, 4))
self.assertListEqual(out[-1][-1].tolist(), [1, 2, 3, 4])
def test_expand_index_like(self, seed=0):
torch.manual_seed(seed)
index = torch.Tensor(
[
[1, 0, 3],
[1, 2, 4],
]
).long()
tokens = torch.rand(2, 4, 5)
expanded_index = utils.expand_index_like(index, tokens)
self.assertEqual(tuple(expanded_index.shape), (2, 3, 5))
def test_get_at_index(self, seed=0):
torch.manual_seed(seed)
index = torch.Tensor(
[
[1, 0, 3],
[1, 2, 0],
]
).long()
tokens = torch.rand(2, 4, 5)
selected = utils.get_at_index(tokens, index)
self.assertEqual(tuple(selected.shape), (2, 3, 5))
# make sure that correct tokens were selected
for i in range(index.shape[0]):
for j in range(index.shape[1]):
self._assert_tensor_equal(tokens[i, index[i, j]], selected[i, j])
def test_set_at_index(self, seed=0):
torch.manual_seed(seed)
index = torch.Tensor(
[
[1, 0, 3],
[1, 2, 0],
]
).long()
tokens = torch.rand(2, 4, 5)
values = torch.rand(2, 3, 5)
new_tokens = utils.set_at_index(tokens, index, values)
# make sure that values are copied correctly
for i in range(index.shape[0]):
for j in range(index.shape[1]):
self._assert_tensor_equal(new_tokens[i, index[i, j]], values[i, j])
def test_mask_at_index(self, seed=0):
torch.manual_seed(seed)
index = torch.Tensor(
[
[1, 0, 3],
[1, 2, 0],
]
).long()
tokens = torch.rand(2, 4, 5)
mask_token = torch.rand(1, 1, 5)
new_tokens = utils.mask_at_index(tokens.clone(), index.clone(), mask_token)
for i in range(index.shape[0]):
for j in range(index.shape[1]):
self._assert_tensor_equal(new_tokens[i, index[i, j]], mask_token[0, 0])
def test_prepend_class_token(self, seed=0):
torch.manual_seed(seed)
tokens = torch.rand(2, 3, 5)
class_token = torch.rand(1, 1, 5)
new_tokens = utils.prepend_class_token(tokens, class_token)
self.assertListEqual(list(new_tokens.shape), [2, 4, 5])
# make sure that class token is inserted in correct place
for i in range(new_tokens.shape[0]):
self._assert_tensor_equal(new_tokens[i][0], class_token[0, 0])
def test_patchify(self, seed=0):
torch.manual_seed(seed)
batch_size, channels, height, width = (2, 3, 8, 8)
patch_size = 4
images = torch.rand(batch_size, channels, height, width)
batch_patches = utils.patchify(images, patch_size)
height_patches = height // patch_size
width_patches = width // patch_size
num_patches = height_patches * width_patches
patch_dim = channels * patch_size**2
self.assertListEqual(
list(batch_patches.shape), [batch_size, num_patches, patch_dim]
)
# make sure that patches are correctly formed
for image, img_patches in zip(images, batch_patches):
for i in range(height_patches):
for j in range(width_patches):
# extract patch from original image
expected_patch = image[
:,
i * patch_size : (i + 1) * patch_size,
j * patch_size : (j + 1) * patch_size,
]
# permute and flatten to match order of patchified images
expected_patch = expected_patch.permute(1, 2, 0).flatten()
img_patch = img_patches[i * width_patches + j]
self._assert_tensor_equal(img_patch, expected_patch)
def _test_random_token_mask(
self, seed=0, mask_ratio=0.6, mask_class_token=False, device="cpu"
):
torch.manual_seed(seed)
batch_size, seq_length = 2, 5
idx_keep, idx_mask = utils.random_token_mask(
size=(batch_size, seq_length),
mask_ratio=mask_ratio,
mask_class_token=mask_class_token,
device=device,
)
# concatenating and sorting the two index tensors should result in a tensor
# with every index appearing exactly once
idx, _ = torch.cat([idx_keep, idx_mask], dim=1).sort(dim=1)
expected_idx = (
torch.arange(seq_length).repeat(batch_size).reshape(batch_size, seq_length)
)
expected_idx = expected_idx.to(device)
self._assert_tensor_equal(idx, expected_idx)
if not mask_class_token:
# class token should be first in index
self.assertTrue(torch.all(idx_keep[:, 0] == 0))
def _test_random_token_mask_parameters(self, device):
for mask_ratio in [0, 0.6, 1.0]:
for mask_class_token in [False, True]:
self._test_random_token_mask(
mask_ratio=mask_ratio,
mask_class_token=mask_class_token,
device=device,
)
def test_random_token_mask(self):
self._test_random_token_mask_parameters(device="cpu")
def test_nearest_neighbors(self):
# Test input with shape (batch_size, map_size_0, num_input_maps)
input_maps = torch.tensor(
[
[[1, 4], [2, 5], [3, 6]],
[[7, 10], [8, 11], [9, 12]],
[[13, 16], [14, 17], [15, 18]],
]
)
print(input_maps.shape)
# Test candidate maps with shape (batch_size, map_size_1, num_candidate_maps)
candidate_maps = torch.tensor(
[
[[1, 1], [2, 2], [3, 3]],
[[1, 1], [2, 2], [3, 3]],
[[1, 1], [2, 2], [3, 3]],
]
)
print(candidate_maps.shape)
# Test distances with shape (batch_size, map_size_0, map_size_1)
distances = torch.tensor(
[[[0, 1, 2], [1, 0, 3]], [[4, 3, 2], [3, 2, 1]], [[2, 3, 4], [3, 4, 5]]]
)
print(input_maps.shape)
# Test num_matches = 2
input_maps_filtered, candidate_maps_filtered = nearest_neighbors(
input_maps, candidate_maps, distances, num_matches=2
)
assert input_maps_filtered.shape == (3, 2, 2)
assert input_maps_filtered.equal(
torch.tensor([[[1, 4], [2, 5]], [[8, 11], [7, 10]], [[13, 16], [14, 17]]])
)
assert candidate_maps_filtered.shape == (3, 2, 2)
assert candidate_maps_filtered.equal(
torch.tensor([[[1, 1], [2, 2]], [[3, 3], [3, 3]], [[1, 1], [1, 1]]])
)
# Test num_matches = 1
input_maps_filtered, candidate_maps_filtered = nearest_neighbors(
input_maps, candidate_maps, distances, num_matches=1
)
assert input_maps_filtered.shape == (3, 1, 2)
assert input_maps_filtered.equal(
torch.tensor([[[1, 4]], [[8, 11]], [[13, 16]]])
)
assert candidate_maps_filtered.shape == (3, 1, 2)
assert candidate_maps_filtered.equal(
torch.tensor([[[1, 1]], [[3, 3]], [[1, 1]]])
)
@unittest.skipUnless(torch.cuda.is_available(), "No cuda available")
def test_random_token_mask_cuda(self):
self._test_random_token_mask_parameters(device="cuda")
def test_get_weight_decay_parameters() -> None:
linear = nn.Linear(10, 10)
batch_norm1d = nn.BatchNorm1d(10)
conv = nn.Conv2d(3, 3, 3)
batch_norm2d = nn.BatchNorm2d(3)
sequential = nn.Sequential(linear, batch_norm1d, conv, batch_norm2d)
params, params_no_weight_decay = utils.get_weight_decay_parameters(
modules=[sequential]
)
assert len(params) == 2
assert len(params_no_weight_decay) == 6
assert params[0] is linear.weight
assert params[1] is conv.weight
assert params_no_weight_decay[0] is linear.bias
assert params_no_weight_decay[1] is batch_norm1d.weight
assert params_no_weight_decay[2] is batch_norm1d.bias
assert params_no_weight_decay[3] is conv.bias
assert params_no_weight_decay[4] is batch_norm2d.weight
assert params_no_weight_decay[5] is batch_norm2d.bias
def test_get_weight_decay_parameters__nested() -> None:
linear = nn.Linear(10, 10)
batch_norm1d = nn.BatchNorm1d(10)
sequential = nn.Sequential(
nn.Sequential(linear, batch_norm1d),
)
params, params_no_weight_decay = utils.get_weight_decay_parameters(
modules=[sequential]
)
assert len(params) == 1
assert len(params_no_weight_decay) == 3
assert params[0] is linear.weight
assert params_no_weight_decay[0] is linear.bias
assert params_no_weight_decay[1] is batch_norm1d.weight
assert params_no_weight_decay[2] is batch_norm1d.bias
def test_get_weight_decay_parameters__batch_norm() -> None:
bn1d = nn.BatchNorm1d(10)
bn2d = nn.BatchNorm2d(10)
params, params_no_weight_decay = utils.get_weight_decay_parameters(
modules=[bn1d, bn2d], decay_batch_norm=True
)
assert len(params) == 4
assert len(params_no_weight_decay) == 0
assert params[0] is bn1d.weight
assert params[1] is bn1d.bias
assert params[2] is bn2d.weight
assert params[3] is bn2d.bias
def test_get_weight_decay_parameters__no_batch_norm() -> None:
bn1d = nn.BatchNorm1d(10)
bn2d = nn.BatchNorm2d(10)
params, params_no_weight_decay = utils.get_weight_decay_parameters(
modules=[bn1d, bn2d], decay_batch_norm=False
)
print(params, params_no_weight_decay)
assert len(params) == 0
assert len(params_no_weight_decay) == 4
assert params_no_weight_decay[0] is bn1d.weight
assert params_no_weight_decay[1] is bn1d.bias
assert params_no_weight_decay[2] is bn2d.weight
assert params_no_weight_decay[3] is bn2d.bias
def test_get_weight_decay_parameters__bias() -> None:
linear = nn.Linear(10, 10)
param, param_no_weight_decay = utils.get_weight_decay_parameters(
modules=[linear], decay_bias=True
)
assert len(param) == 2
assert len(param_no_weight_decay) == 0
assert param[0] is linear.weight
assert param[1] is linear.bias
def test_get_weight_decay_parameters__no_bias() -> None:
linear = nn.Linear(10, 10)
param, param_no_weight_decay = utils.get_weight_decay_parameters(
modules=[linear], decay_bias=False
)
assert len(param) == 1
assert len(param_no_weight_decay) == 1
assert param[0] is linear.weight
assert param_no_weight_decay[0] is linear.bias
| 13,742 | 35.357143 | 87 | py |
lightly | lightly-master/tests/models/test_ModelsBYOL.py | import unittest
import torch
import torch.nn as nn
import torchvision
import lightly
from lightly.models import BYOL, ResNetGenerator
def get_backbone(resnet, num_ftrs=64):
last_conv_channels = list(resnet.children())[-1].in_features
backbone = nn.Sequential(
lightly.models.batchnorm.get_norm_layer(3, 0),
*list(resnet.children())[:-1],
nn.Conv2d(last_conv_channels, num_ftrs, 1),
nn.AdaptiveAvgPool2d(1),
)
return backbone
class TestModelsBYOL(unittest.TestCase):
def setUp(self):
self.resnet_variants = ["resnet-18", "resnet-50"]
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = BYOL(get_backbone(resnet))
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = BYOL(get_backbone(resnet)).to(device)
self.assertIsNotNone(model)
else:
pass
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for num_ftrs, out_dim in zip([16, 64], [64, 256]):
resnet = ResNetGenerator(model_name)
model = BYOL(
get_backbone(resnet, num_ftrs=num_ftrs),
num_ftrs=num_ftrs,
out_dim=out_dim,
).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], num_ftrs)
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_head(out_features.squeeze())
self.assertEqual(out_projection.shape[1], out_dim)
self.assertIsNotNone(model)
def test_variations_input_dimension(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for input_width, input_height in zip([32, 64], [64, 64]):
resnet = ResNetGenerator(model_name)
model = BYOL(get_backbone(resnet, num_ftrs=32), num_ftrs=32).to(device)
input_tensor = torch.rand(
(self.batch_size, 3, input_height, input_width)
)
with torch.no_grad():
out, _ = model(input_tensor.to(device), input_tensor.to(device))
self.assertIsNotNone(model)
self.assertIsNotNone(out)
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
resnet = ResNetGenerator("resnet-18")
model = BYOL(get_backbone(resnet, num_ftrs=32), num_ftrs=32, out_dim=128).to(
device
)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
(z0, p0), (z1, p1) = model(x0, x1)
self.assertEqual(z0.shape, (self.batch_size, 128))
self.assertEqual(z1.shape, (self.batch_size, 128))
self.assertEqual(p0.shape, (self.batch_size, 128))
self.assertEqual(p1.shape, (self.batch_size, 128))
def test_raises(self):
resnet = ResNetGenerator("resnet-18")
model = BYOL(get_backbone(resnet))
x0 = torch.rand((self.batch_size, 3, 64, 64))
with self.assertRaises(ValueError):
model(x0, None)
with self.assertRaises(ValueError):
model(None, x0)
# test different input shape
x1 = torch.rand((self.batch_size, 5, 32, 32))
with self.assertRaises(ValueError):
model(x0, x1)
if __name__ == "__main__":
unittest.main()
| 4,235 | 35.205128 | 87 | py |
lightly | lightly-master/tests/models/test_ModelsMoCo.py | import unittest
import torch
import torch.nn as nn
import torchvision
import lightly
from lightly.models import MoCo, ResNetGenerator
def get_backbone(resnet, num_ftrs=64):
last_conv_channels = list(resnet.children())[-1].in_features
backbone = nn.Sequential(
lightly.models.batchnorm.get_norm_layer(3, 0),
*list(resnet.children())[:-1],
nn.Conv2d(last_conv_channels, num_ftrs, 1),
nn.AdaptiveAvgPool2d(1),
)
return backbone
class TestModelsMoCo(unittest.TestCase):
def setUp(self):
self.resnet_variants = ["resnet-18", "resnet-50"]
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = MoCo(get_backbone(resnet))
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = MoCo(get_backbone(resnet)).to(device)
self.assertIsNotNone(model)
else:
pass
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for num_ftrs, out_dim in zip([16, 64], [64, 256]):
resnet = ResNetGenerator(model_name)
model = MoCo(
get_backbone(resnet, num_ftrs=num_ftrs),
num_ftrs=num_ftrs,
out_dim=out_dim,
).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], num_ftrs)
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_head(out_features.squeeze())
self.assertEqual(out_projection.shape[1], out_dim)
self.assertIsNotNone(model)
def test_variations_input_dimension(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for input_width, input_height in zip([32, 64], [64, 64]):
resnet = ResNetGenerator(model_name)
model = MoCo(get_backbone(resnet, num_ftrs=32)).to(device)
input_tensor = torch.rand(
(self.batch_size, 3, input_height, input_width)
)
with torch.no_grad():
out = model(input_tensor.to(device))
self.assertIsNotNone(model)
self.assertIsNotNone(out)
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
resnet = ResNetGenerator("resnet-18")
model = MoCo(get_backbone(resnet, num_ftrs=32), out_dim=128).to(device)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
out = model(x0)
self.assertEqual(out.shape, (self.batch_size, 128))
out, features = model(x0, return_features=True)
self.assertEqual(out.shape, (self.batch_size, 128))
self.assertEqual(features.shape, (self.batch_size, 32))
out0, out1 = model(x0, x1)
self.assertEqual(out0.shape, (self.batch_size, 128))
self.assertEqual(out1.shape, (self.batch_size, 128))
(out0, f0), (out1, f1) = model(x0, x1, return_features=True)
self.assertEqual(out0.shape, (self.batch_size, 128))
self.assertEqual(out1.shape, (self.batch_size, 128))
self.assertEqual(f0.shape, (self.batch_size, 32))
self.assertEqual(f1.shape, (self.batch_size, 32))
if __name__ == "__main__":
unittest.main()
| 4,132 | 36.572727 | 82 | py |
lightly | lightly-master/tests/models/test_ModelsNNCLR.py | import unittest
import torch
import torch.nn as nn
import torchvision
from lightly.models import NNCLR
from lightly.models.modules import NNMemoryBankModule
def resnet_generator(name: str):
if name == "resnet18":
return torchvision.models.resnet18()
elif name == "resnet50":
return torchvision.models.resnet50()
raise NotImplementedError
def get_backbone(model: nn.Module):
backbone = torch.nn.Sequential(*(list(model.children())[:-1]))
return backbone
class TestNNCLR(unittest.TestCase):
def setUp(self):
self.resnet_variants = dict(
resnet18=dict(
num_ftrs=512,
proj_hidden_dim=512,
pred_hidden_dim=128,
out_dim=512,
),
resnet50=dict(
num_ftrs=2048,
proj_hidden_dim=2048,
pred_hidden_dim=512,
out_dim=2048,
),
)
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = NNCLR(get_backbone(resnet), **config)
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
if not torch.cuda.is_available():
return
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = NNCLR(get_backbone(resnet), **config).to("cuda")
self.assertIsNotNone(model)
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], config["num_ftrs"])
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_mlp(out_features.squeeze())
self.assertEqual(out_projection.shape[1], config["out_dim"])
# check that prediction head output has right dimension
with torch.no_grad():
out_prediction = model.prediction_mlp(out_projection.squeeze())
self.assertEqual(out_prediction.shape[1], config["out_dim"])
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
out = model(x0)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
out, features = model(x0, return_features=True)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(features.shape, (self.batch_size, config["num_ftrs"]))
out0, out1 = model(x0, x1)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
(out0, f0), (out1, f1) = model(x0, x1, return_features=True)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(f0.shape, (self.batch_size, config["num_ftrs"]))
self.assertEqual(f1.shape, (self.batch_size, config["num_ftrs"]))
def test_memory_bank(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
for nn_size in [2**3, 2**8]:
nn_replacer = NNMemoryBankModule(size=nn_size)
with torch.no_grad():
for i in range(10):
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
(z0, p0), (z1, p1) = model(x0, x1)
z0 = nn_replacer(z0.detach(), update=False)
z1 = nn_replacer(z1.detach(), update=True)
| 5,345 | 41.094488 | 83 | py |
lightly | lightly-master/tests/models/test_ModelsSimCLR.py | import unittest
import torch
import torch.nn as nn
import torchvision
import lightly
from lightly.models import ResNetGenerator, SimCLR
def get_backbone(resnet, num_ftrs=64):
last_conv_channels = list(resnet.children())[-1].in_features
backbone = nn.Sequential(
lightly.models.batchnorm.get_norm_layer(3, 0),
*list(resnet.children())[:-1],
nn.Conv2d(last_conv_channels, num_ftrs, 1),
nn.AdaptiveAvgPool2d(1),
)
return backbone
class TestModelsSimCLR(unittest.TestCase):
def setUp(self):
self.resnet_variants = ["resnet-18", "resnet-50"]
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = SimCLR(get_backbone(resnet))
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
if device == "cuda":
for model_name in self.resnet_variants:
resnet = ResNetGenerator(model_name)
model = SimCLR(get_backbone(resnet)).to(device)
self.assertIsNotNone(model)
else:
pass
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for num_ftrs, out_dim in zip([16, 64], [64, 256]):
resnet = ResNetGenerator(model_name)
model = SimCLR(
get_backbone(resnet, num_ftrs=num_ftrs),
num_ftrs=num_ftrs,
out_dim=out_dim,
).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], num_ftrs)
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_head(out_features.squeeze())
self.assertEqual(out_projection.shape[1], out_dim)
self.assertIsNotNone(model)
def test_variations_input_dimension(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name in self.resnet_variants:
for input_width, input_height in zip([32, 64], [64, 64]):
resnet = ResNetGenerator(model_name)
model = SimCLR(get_backbone(resnet, num_ftrs=32)).to(device)
input_tensor = torch.rand(
(self.batch_size, 3, input_height, input_width)
)
with torch.no_grad():
out = model(input_tensor.to(device))
self.assertIsNotNone(model)
self.assertIsNotNone(out)
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
resnet = ResNetGenerator("resnet-18")
model = SimCLR(get_backbone(resnet, num_ftrs=32), out_dim=128).to(device)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
out = model(x0)
self.assertEqual(out.shape, (self.batch_size, 128))
out, features = model(x0, return_features=True)
self.assertEqual(out.shape, (self.batch_size, 128))
self.assertEqual(features.shape, (self.batch_size, 32))
out0, out1 = model(x0, x1)
self.assertEqual(out0.shape, (self.batch_size, 128))
self.assertEqual(out1.shape, (self.batch_size, 128))
(out0, f0), (out1, f1) = model(x0, x1, return_features=True)
self.assertEqual(out0.shape, (self.batch_size, 128))
self.assertEqual(out1.shape, (self.batch_size, 128))
self.assertEqual(f0.shape, (self.batch_size, 32))
self.assertEqual(f1.shape, (self.batch_size, 32))
if __name__ == "__main__":
unittest.main()
| 4,146 | 36.7 | 82 | py |
lightly | lightly-master/tests/models/test_ModelsSimSiam.py | import unittest
import torch
import torch.nn as nn
import torchvision
from lightly.models import SimSiam
def resnet_generator(name: str):
if name == "resnet18":
return torchvision.models.resnet18()
elif name == "resnet50":
return torchvision.models.resnet50()
raise NotImplementedError
def get_backbone(model: nn.Module):
backbone = torch.nn.Sequential(*(list(model.children())[:-1]))
return backbone
class TestSimSiam(unittest.TestCase):
def setUp(self):
self.resnet_variants = dict(
resnet18=dict(
num_ftrs=512,
proj_hidden_dim=512,
pred_hidden_dim=128,
out_dim=512,
),
resnet50=dict(
num_ftrs=2048,
proj_hidden_dim=2048,
pred_hidden_dim=512,
out_dim=2048,
),
)
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = SimSiam(get_backbone(resnet), **config)
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
if not torch.cuda.is_available():
return
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = SimSiam(get_backbone(resnet), **config).to("cuda")
self.assertIsNotNone(model)
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = SimSiam(get_backbone(resnet), **config).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], config["num_ftrs"])
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_mlp(out_features.squeeze())
self.assertEqual(out_projection.shape[1], config["out_dim"])
# check that prediction head output has right dimension
with torch.no_grad():
out_prediction = model.prediction_mlp(out_projection.squeeze())
self.assertEqual(out_prediction.shape[1], config["out_dim"])
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = resnet_generator(model_name)
model = SimSiam(get_backbone(resnet), **config).to(device)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
out = model(x0)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
out, features = model(x0, return_features=True)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(features.shape, (self.batch_size, config["num_ftrs"]))
out0, out1 = model(x0, x1)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
(out0, f0), (out1, f1) = model(x0, x1, return_features=True)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(f0.shape, (self.batch_size, config["num_ftrs"]))
self.assertEqual(f1.shape, (self.batch_size, config["num_ftrs"]))
| 4,483 | 40.137615 | 83 | py |
lightly | lightly-master/tests/models/test_ProjectionHeads.py | import unittest
import torch
import lightly
from lightly.models.modules.heads import (
BarlowTwinsProjectionHead,
BYOLPredictionHead,
BYOLProjectionHead,
DINOProjectionHead,
MoCoProjectionHead,
MSNProjectionHead,
NNCLRPredictionHead,
NNCLRProjectionHead,
SimCLRProjectionHead,
SimSiamPredictionHead,
SimSiamProjectionHead,
SwaVProjectionHead,
SwaVPrototypes,
TiCoProjectionHead,
VicRegLLocalProjectionHead,
)
class TestProjectionHeads(unittest.TestCase):
def setUp(self):
self.n_features = [
(8, 16, 32),
(8, 32, 16),
(16, 8, 32),
(16, 32, 8),
(32, 8, 16),
(32, 16, 8),
]
self.swavProtoypes = [(8, 16, [32, 64, 128])]
self.heads = [
BarlowTwinsProjectionHead,
BYOLProjectionHead,
BYOLPredictionHead,
DINOProjectionHead,
MoCoProjectionHead,
MSNProjectionHead,
NNCLRProjectionHead,
NNCLRPredictionHead,
SimCLRProjectionHead,
SimSiamProjectionHead,
SimSiamPredictionHead,
SwaVProjectionHead,
TiCoProjectionHead,
VicRegLLocalProjectionHead,
]
def test_single_projection_head(self, device: str = "cpu", seed=0):
for head_cls in self.heads:
for in_features, hidden_features, out_features in self.n_features:
torch.manual_seed(seed)
if head_cls == DINOProjectionHead:
bottleneck_features = hidden_features
head = head_cls(
in_features, hidden_features, bottleneck_features, out_features
)
elif head_cls == SimCLRProjectionHead:
head = head_cls(
in_features, hidden_features, out_features, batch_norm=False
)
else:
head = head_cls(in_features, hidden_features, out_features)
head = head.eval()
head = head.to(device)
for batch_size in [1, 2]:
msg = (
f"head: {head_cls}"
+ f"d_in, d_h, d_out = "
+ f"{in_features}x{hidden_features}x{out_features}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, in_features)).to(device)
with torch.no_grad():
y = head(x)
self.assertEqual(y.shape[0], batch_size)
self.assertEqual(y.shape[1], out_features)
@unittest.skipUnless(torch.cuda.is_available(), "skip")
def test_single_projection_head_cuda(self, seed=0):
self.test_single_projection_head(device="cuda", seed=seed)
def test_swav_prototypes(self, device: str = "cpu", seed=0):
for in_features, _, n_prototypes in self.n_features:
torch.manual_seed(seed)
prototypes = SwaVPrototypes(in_features, n_prototypes)
prototypes = prototypes.eval()
prototypes = prototypes.to(device)
for batch_size in [1, 2]:
msg = (
"prototypes d_in, n_prototypes = "
+ f"{in_features} x {n_prototypes}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, in_features)).to(device)
with torch.no_grad():
y = prototypes(x)
self.assertEqual(y.shape[0], batch_size)
self.assertEqual(y.shape[1], n_prototypes)
def test_swav_frozen_prototypes(self, device: str = "cpu", seed=0):
criterion = torch.nn.L1Loss()
linear_layer = torch.nn.Linear(8, 8, bias=False)
prototypes = SwaVPrototypes(
input_dim=8, n_prototypes=8, n_steps_frozen_prototypes=2
)
optimizer = torch.optim.SGD(prototypes.parameters(), lr=0.01)
torch.manual_seed(seed)
in_features = torch.rand(4, 8, device="cpu")
target_features = torch.ones(4, 8, device="cpu")
for step in range(4):
out_features = linear_layer(in_features)
out_features = prototypes.forward(out_features, step)
loss = criterion(out_features, target_features)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if step == 0:
loss0 = loss
if step <= 2:
self.assertEqual(loss, loss0)
if step > 2:
self.assertNotEqual(loss, loss0)
def test_swav_multi_prototypes(self, device: str = "cpu", seed=0):
for in_features, _, n_prototypes in self.swavProtoypes:
torch.manual_seed(seed)
prototypes = SwaVPrototypes(in_features, n_prototypes)
prototypes = prototypes.eval()
prototypes = prototypes.to(device)
for batch_size in [1, 2]:
msg = (
"prototypes d_in, n_prototypes = "
+ f"{in_features} x {n_prototypes}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, in_features)).to(device)
with torch.no_grad():
y = prototypes(x)
for layerNum, prototypeSize in enumerate(n_prototypes):
self.assertEqual(y[layerNum].shape[0], batch_size)
self.assertEqual(y[layerNum].shape[1], prototypeSize)
@unittest.skipUnless(torch.cuda.is_available(), "skip")
def test_swav_prototypes_cuda(self, seed=0):
self.test_swav_prototypes(device="cuda", seed=seed)
def test_dino_projection_head(self, device="cpu", seed=0):
input_dim, hidden_dim, output_dim = self.n_features[0]
for bottleneck_dim in [8, 16, 32]:
for batch_norm in [False, True]:
torch.manual_seed(seed)
head = DINOProjectionHead(
input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
bottleneck_dim=bottleneck_dim,
batch_norm=batch_norm,
)
head = head.eval()
head = head.to(device)
for batch_size in [1, 2]:
msg = (
f"bottleneck_dim={bottleneck_dim}, " f"batch_norm={batch_norm}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, input_dim)).to(device)
with torch.no_grad():
y = head(x)
self.assertEqual(y.shape[0], batch_size)
self.assertEqual(y.shape[1], output_dim)
@unittest.skipUnless(torch.cuda.is_available(), "skip")
def test_dino_projection_head_cuda(self, seed=0):
self.test_dino_projection_head(device="cuda", seed=seed)
def test_dino_projection_head_freeze_last_layer(self, seed=0):
"""Test if freeze last layer cancels backprop."""
torch.manual_seed(seed)
for norm_last_layer in [False, True]:
for freeze_last_layer in range(-1, 3):
head = DINOProjectionHead(
input_dim=4,
hidden_dim=4,
output_dim=4,
bottleneck_dim=4,
freeze_last_layer=freeze_last_layer,
norm_last_layer=norm_last_layer,
)
optimizer = torch.optim.SGD(head.parameters(), lr=1)
criterion = lightly.loss.DINOLoss(output_dim=4)
# Store initial weights of last layer
initial_data = [
param.data.detach().clone()
for param in head.last_layer.parameters()
]
for epoch in range(5):
with self.subTest(
f"norm_last_layer={norm_last_layer}, "
f"freeze_last_layer={freeze_last_layer}, "
f"epoch={epoch}"
):
views = [torch.rand((3, 4)) for _ in range(2)]
teacher_out = [head(view) for view in views]
student_out = [head(view) for view in views]
loss = criterion(teacher_out, student_out, epoch=epoch)
optimizer.zero_grad()
loss.backward()
head.cancel_last_layer_gradients(current_epoch=epoch)
optimizer.step()
params = head.last_layer.parameters()
# Verify that weights have (not) changed depending on epoch.
for param, init_data in zip(params, initial_data):
if param.requires_grad:
are_same = torch.allclose(param.data, init_data)
if epoch >= freeze_last_layer:
self.assertFalse(are_same)
else:
self.assertTrue(are_same)
def test_simclr_projection_head_multiple_layers(self, device: str = "cpu", seed=0):
for in_features, hidden_features, out_features in self.n_features:
for num_layers in range(2, 5):
for batch_norm in [True, False]:
torch.manual_seed(seed)
head = SimCLRProjectionHead(
in_features,
hidden_features,
out_features,
num_layers,
batch_norm,
)
head = head.eval()
head = head.to(device)
for batch_size in [1, 2]:
msg = (
f"head: SimCLRProjectionHead"
+ f"d_in, d_h, d_out = "
+ f"{in_features}x{hidden_features}x{out_features}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, in_features)).to(device)
with torch.no_grad():
y = head(x)
self.assertEqual(y.shape[0], batch_size)
self.assertEqual(y.shape[1], out_features)
def test_moco_projection_head_multiple_layers(self, device: str = "cpu", seed=0):
for in_features, hidden_features, out_features in self.n_features:
for num_layers in range(2, 5):
for batch_norm in [True, False]:
torch.manual_seed(seed)
head = MoCoProjectionHead(
in_features,
hidden_features,
out_features,
num_layers,
batch_norm,
)
head = head.eval()
head = head.to(device)
for batch_size in [1, 2]:
msg = (
f"head: MoCoProjectionHead"
+ f"d_in, d_h, d_out = "
+ f"{in_features}x{hidden_features}x{out_features}"
)
with self.subTest(msg=msg):
x = torch.torch.rand((batch_size, in_features)).to(device)
with torch.no_grad():
y = head(x)
self.assertEqual(y.shape[0], batch_size)
self.assertEqual(y.shape[1], out_features)
| 12,093 | 42.503597 | 87 | py |
lightly | lightly-master/tests/models/modules/test_masked_autoencoder.py | import unittest
import torch
import torchvision
from lightly import _torchvision_vit_available
from lightly.models import utils
if _torchvision_vit_available:
from lightly.models.modules import MAEBackbone, MAEDecoder, MAEEncoder
@unittest.skipUnless(_torchvision_vit_available, "Torchvision ViT not available")
class TestMAEEncoder(unittest.TestCase):
def _vit(self):
return torchvision.models.vision_transformer.vit_b_32(progress=False)
def test_from_vit(self):
MAEEncoder.from_vit_encoder(self._vit().encoder)
def _test_forward(self, device, batch_size=8, seed=0):
torch.manual_seed(seed)
vit = self._vit()
encoder = MAEEncoder.from_vit_encoder(vit.encoder).to(device)
tokens = torch.rand(batch_size, vit.seq_length, vit.hidden_dim).to(device)
_idx_keep, _ = utils.random_token_mask(
size=(batch_size, vit.seq_length),
device=device,
)
for idx_keep in [None, _idx_keep]:
with self.subTest(idx_keep=idx_keep):
out = encoder(tokens, idx_keep)
# output shape must be correct
expected_shape = list(tokens.shape)
if idx_keep is not None:
expected_shape[1] = idx_keep.shape[1]
self.assertListEqual(list(out.shape), expected_shape)
# output must have reasonable numbers
self.assertTrue(torch.all(torch.not_equal(out, torch.inf)))
def test_forward(self):
self._test_forward(torch.device("cpu"))
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available.")
def test_forward_cuda(self):
self._test_forward(torch.device("cuda"))
@unittest.skipUnless(_torchvision_vit_available, "Torchvision ViT not available")
class TestMAEBackbone(unittest.TestCase):
def _vit(self):
return torchvision.models.vision_transformer.vit_b_32(progress=False)
def test_from_vit(self):
MAEBackbone.from_vit(self._vit())
def _test_forward(self, device, batch_size=8, seed=0):
torch.manual_seed(seed)
vit = self._vit()
backbone = MAEBackbone.from_vit(vit).to(device)
images = torch.rand(batch_size, 3, vit.image_size, vit.image_size).to(device)
_idx_keep, _ = utils.random_token_mask(
size=(batch_size, vit.seq_length),
device=device,
)
for idx_keep in [None, _idx_keep]:
with self.subTest(idx_keep=idx_keep):
class_tokens = backbone(images, idx_keep)
# output shape must be correct
expected_shape = [batch_size, vit.hidden_dim]
self.assertListEqual(list(class_tokens.shape), expected_shape)
# output must have reasonable numbers
self.assertTrue(torch.all(torch.not_equal(class_tokens, torch.inf)))
def test_forward(self):
self._test_forward(torch.device("cpu"))
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available.")
def test_forward_cuda(self):
self._test_forward(torch.device("cuda"))
@unittest.skipUnless(_torchvision_vit_available, "Torchvision ViT not available")
class TestMAEDecoder(unittest.TestCase):
def test_init(self):
return MAEDecoder(
seq_length=50,
num_layers=2,
num_heads=4,
embed_input_dim=128,
hidden_dim=256,
mlp_dim=256 * 4,
out_dim=3 * 32**2,
)
def _test_forward(self, device, batch_size=8, seed=0):
torch.manual_seed(seed)
seq_length = 50
embed_input_dim = 128
out_dim = 3 * 32**2
decoder = MAEDecoder(
seq_length=seq_length,
num_layers=2,
num_heads=4,
embed_input_dim=embed_input_dim,
hidden_dim=256,
mlp_dim=256 * 4,
out_dim=out_dim,
).to(device)
tokens = torch.rand(batch_size, seq_length, embed_input_dim).to(device)
predictions = decoder(tokens)
# output shape must be correct
expected_shape = [batch_size, seq_length, out_dim]
self.assertListEqual(list(predictions.shape), expected_shape)
# output must have reasonable numbers
self.assertTrue(torch.all(torch.not_equal(predictions, torch.inf)))
def test_forward(self):
self._test_forward(torch.device("cpu"))
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available.")
def test_forward_cuda(self):
self._test_forward(torch.device("cuda"))
| 4,602 | 34.407692 | 85 | py |
lightly | lightly-master/tests/transforms/test_GaussianBlur.py | import unittest
from PIL import Image
from lightly.transforms import GaussianBlur
class TestGaussianBlur(unittest.TestCase):
def test_on_pil_image(self):
for w in range(1, 100):
for h in range(1, 100):
gaussian_blur = GaussianBlur()
sample = Image.new("RGB", (w, h))
gaussian_blur(sample)
def test_raise_kernel_size_deprecation(self):
gaussian_blur = GaussianBlur(kernel_size=2)
self.assertWarns(DeprecationWarning)
def test_raise_scale_deprecation(self):
gaussian_blur = GaussianBlur(scale=0.1)
self.assertWarns(DeprecationWarning)
| 651 | 27.347826 | 51 | py |
lightly | lightly-master/tests/transforms/test_Jigsaw.py | import unittest
from PIL import Image
from lightly.transforms import Jigsaw
class TestJigsaw(unittest.TestCase):
def test_on_pil_image(self):
crop = Jigsaw()
sample = Image.new("RGB", (255, 255))
crop(sample)
| 241 | 17.615385 | 45 | py |
lightly | lightly-master/tests/transforms/test_Solarize.py | import unittest
from PIL import Image
from lightly.transforms.solarize import RandomSolarization
class TestRandomSolarization(unittest.TestCase):
def test_on_pil_image(self):
for w in [32, 64, 128]:
for h in [32, 64, 128]:
solarization = RandomSolarization(0.5)
sample = Image.new("RGB", (w, h))
solarization(sample)
| 393 | 25.266667 | 58 | py |
lightly | lightly-master/tests/transforms/test_dino_transform.py | from PIL import Image
from lightly.transforms.dino_transform import DINOTransform, DINOViewTransform
def test_view_on_pil_image():
single_view_transform = DINOViewTransform(crop_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = DINOTransform(global_crop_size=32, local_crop_size=8)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 8
# global views
assert all(out.shape == (3, 32, 32) for out in output[:2])
# local views
assert all(out.shape == (3, 8, 8) for out in output[2:])
| 710 | 31.318182 | 80 | py |
lightly | lightly-master/tests/transforms/test_fastsiam_transform.py | from PIL import Image
from lightly.transforms.fast_siam_transform import FastSiamTransform
def test_multi_view_on_pil_image():
multi_view_transform = FastSiamTransform(num_views=3, input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 3
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
assert output[2].shape == (3, 32, 32)
| 441 | 30.571429 | 72 | py |
lightly | lightly-master/tests/transforms/test_location_to_NxN_grid.py | import torch
import lightly.transforms.random_crop_and_flip_with_grid as test_module
def test_location_to_NxN_grid():
# create a test instance of the Location class
test_location = test_module.Location(
left=10,
top=20,
width=100,
height=200,
image_height=244,
image_width=244,
horizontal_flip=True,
vertical_flip=False,
)
# create an instance of the class containing the function
test_class = test_module.RandomResizedCropAndFlip(grid_size=3)
# call the function with the test location
result = test_class.location_to_NxN_grid(test_location)
# create a tensor representing the expected output
expected_output = torch.tensor(
[
[[126.6667, 53.3333], [76.6667, 53.3333], [26.6667, 53.3333]],
[[126.6667, 153.3333], [76.6667, 153.3333], [26.6667, 153.3333]],
[[126.6667, 253.3333], [76.6667, 253.3333], [26.6667, 253.3333]],
]
)
# check that the function output matches the expected output
assert torch.allclose(result, expected_output)
| 1,105 | 30.6 | 77 | py |
lightly | lightly-master/tests/transforms/test_mae_transform.py | from PIL import Image
from lightly.transforms.mae_transform import MAETransform
def test_multi_view_on_pil_image():
multi_view_transform = MAETransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 1
assert output[0].shape == (3, 32, 32)
| 328 | 26.416667 | 57 | py |
lightly | lightly-master/tests/transforms/test_moco_transform.py | from PIL import Image
from lightly.transforms.moco_transform import MoCoV1Transform, MoCoV2Transform
def test_moco_v1_multi_view_on_pil_image():
multi_view_transform = MoCoV1Transform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
def test_moco_v2_multi_view_on_pil_image():
multi_view_transform = MoCoV2Transform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
| 702 | 30.954545 | 78 | py |
lightly | lightly-master/tests/transforms/test_msn_transform.py | from PIL import Image
from lightly.transforms.msn_transform import MSNTransform, MSNViewTransform
def test_view_on_pil_image():
single_view_transform = MSNViewTransform(crop_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = MSNTransform(random_size=32, focal_size=8)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 12
# global views
assert all(out.shape == (3, 32, 32) for out in output[:2])
# local views
assert all(out.shape == (3, 8, 8) for out in output[2:])
| 696 | 30.681818 | 75 | py |
lightly | lightly-master/tests/transforms/test_multi_crop_transform.py | from lightly.transforms.multi_crop_transform import MultiCropTranform
| 70 | 34.5 | 69 | py |
lightly | lightly-master/tests/transforms/test_multi_view_transform.py | import unittest
import torchvision.transforms as T
from PIL import Image
from lightly.transforms.multi_view_transform import MultiViewTransform
def test_multi_view_on_pil_image():
multi_view_transform = MultiViewTransform(
[
T.RandomHorizontalFlip(p=0.1),
T.RandomVerticalFlip(p=0.5),
T.RandomGrayscale(p=0.3),
]
)
sample = Image.new("RGB", (10, 10))
output = multi_view_transform(sample)
assert len(output) == 3
| 489 | 23.5 | 70 | py |
lightly | lightly-master/tests/transforms/test_pirl_transform.py | from PIL import Image
from lightly.transforms.pirl_transform import PIRLTransform
def test_multi_view_on_pil_image():
multi_view_transform = PIRLTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (9, 3, 10, 10)
| 376 | 28 | 59 | py |
lightly | lightly-master/tests/transforms/test_rotation.py | from PIL import Image
from lightly.transforms.rotation import (
RandomRotate,
RandomRotateDegrees,
random_rotation_transform,
)
def test_RandomRotate_on_pil_image():
random_rotate = RandomRotate()
sample = Image.new("RGB", (100, 100))
random_rotate(sample)
def test_RandomRotateDegrees_on_pil_image():
for degrees in [0, 1, 45, (0, 0), (-15, 30)]:
random_rotate = RandomRotateDegrees(prob=0.5, degrees=degrees)
sample = Image.new("RGB", (100, 100))
random_rotate(sample)
def test_random_rotation_transform():
transform = random_rotation_transform(rr_prob=1.0, rr_degrees=None)
assert isinstance(transform, RandomRotate)
transform = random_rotation_transform(rr_prob=1.0, rr_degrees=45)
assert isinstance(transform, RandomRotateDegrees)
transform = random_rotation_transform(rr_prob=1.0, rr_degrees=(30, 45))
assert isinstance(transform, RandomRotateDegrees)
| 941 | 30.4 | 75 | py |
lightly | lightly-master/tests/transforms/test_simclr_transform.py | from PIL import Image
from lightly.transforms.simclr_transform import SimCLRTransform, SimCLRViewTransform
def test_view_on_pil_image():
single_view_transform = SimCLRViewTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = SimCLRTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
| 619 | 30 | 84 | py |
lightly | lightly-master/tests/transforms/test_simsiam_transform.py | from PIL import Image
from lightly.transforms.simsiam_transform import SimSiamTransform, SimSiamViewTransform
def test_view_on_pil_image():
single_view_transform = SimSiamViewTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = SimSiamTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
| 624 | 30.25 | 87 | py |
lightly | lightly-master/tests/transforms/test_smog_transform.py | from PIL import Image
from lightly.transforms.smog_transform import SMoGTransform, SmoGViewTransform
def test_view_on_pil_image():
single_view_transform = SmoGViewTransform(crop_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = SMoGTransform(crop_sizes=(32, 8))
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 8
assert all(out.shape == (3, 32, 32) for out in output[:4])
assert all(out.shape == (3, 8, 8) for out in output[4:])
| 653 | 31.7 | 78 | py |
lightly | lightly-master/tests/transforms/test_swav_transform.py | from PIL import Image
from lightly.transforms.swav_transform import SwaVTransform, SwaVViewTransform
def test_view_on_pil_image():
single_view_transform = SwaVViewTransform()
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 100, 100)
def test_multi_view_on_pil_image():
multi_view_transform = SwaVTransform(crop_sizes=(32, 8))
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 8
assert all(out.shape == (3, 32, 32) for out in output[:2])
assert all(out.shape == (3, 8, 8) for out in output[2:])
| 643 | 31.2 | 78 | py |
lightly | lightly-master/tests/transforms/test_vicreg_transform.py | from PIL import Image
from lightly.transforms.vicreg_transform import VICRegTransform, VICRegViewTransform
def test_view_on_pil_image():
single_view_transform = VICRegViewTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 32, 32)
def test_multi_view_on_pil_image():
multi_view_transform = VICRegTransform(input_size=32)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 2
assert output[0].shape == (3, 32, 32)
assert output[1].shape == (3, 32, 32)
| 619 | 30 | 84 | py |
lightly | lightly-master/tests/transforms/test_vicregl_transform.py | from PIL import Image
from lightly.transforms.vicregl_transform import VICRegLTransform, VICRegLViewTransform
def test_view_on_pil_image():
single_view_transform = VICRegLViewTransform()
sample = Image.new("RGB", (100, 100))
output = single_view_transform(sample)
assert output.shape == (3, 100, 100)
def test_multi_view_on_pil_image():
multi_view_transform = VICRegLTransform(
global_crop_size=32,
local_crop_size=8,
n_local_views=6,
global_grid_size=4,
local_grid_size=2,
)
sample = Image.new("RGB", (100, 100))
output = multi_view_transform(sample)
assert len(output) == 16 # (2 global crops * 2) + (6 local crops * 2)
global_views = output[:2]
local_views = output[2:8]
global_grids = output[8:10]
local_grids = output[10:]
assert all(view.shape == (3, 32, 32) for view in global_views)
assert all(view.shape == (3, 8, 8) for view in local_views)
assert all(grid.shape == (4, 4, 2) for grid in global_grids)
assert all(grid.shape == (2, 2, 2) for grid in local_grids)
| 1,086 | 32.96875 | 87 | py |
lightly | lightly-master/tests/utils/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/tests/utils/test_debug.py | import math
import unittest
import numpy as np
import torch
from PIL import Image
from lightly.data import collate
from lightly.utils import debug
try:
import matplotlib.pyplot as plt
MATPLOTLIB_AVAILABLE = True
except ImportError:
MATPLOTLIB_AVAILABLE = False
BATCH_SIZE = 10
DIMENSION = 10
class TestDebug(unittest.TestCase):
def _generate_random_image(self, w: int, h: int, c: int):
array = np.random.rand(h, w, c) * 255
image = Image.fromarray(array.astype("uint8")).convert("RGB")
return image
def test_std_of_l2_normalized_collapsed(self):
z = torch.ones(BATCH_SIZE, DIMENSION) # collapsed output
self.assertEqual(debug.std_of_l2_normalized(z), 0.0)
def test_std_of_l2_normalized_uniform(self, eps: float = 1e-5):
z = torch.eye(BATCH_SIZE)
self.assertLessEqual(
abs(debug.std_of_l2_normalized(z) - 1 / math.sqrt(z.shape[1])),
eps,
)
def test_std_of_l2_normalized_raises(self):
z = torch.zeros(BATCH_SIZE)
with self.assertRaises(ValueError):
debug.std_of_l2_normalized(z)
z = torch.zeros(BATCH_SIZE, BATCH_SIZE, DIMENSION)
with self.assertRaises(ValueError):
debug.std_of_l2_normalized(z)
@unittest.skipUnless(MATPLOTLIB_AVAILABLE, "Matplotlib not installed")
def test_plot_augmented_images_image_collate_function(self):
# simclr collate function is a subclass of the image collate function
collate_function = collate.SimCLRCollateFunction()
for n_images in range(2, 10):
with self.subTest():
images = [
self._generate_random_image(100, 100, 3) for _ in range(n_images)
]
fig = debug.plot_augmented_images(images, collate_function)
self.assertIsNotNone(fig)
@unittest.skipUnless(MATPLOTLIB_AVAILABLE, "Matplotlib not installed")
def test_plot_augmented_images_multi_view_collate_function(self):
# dion collate function is a subclass of the multi view collate function
collate_function = collate.DINOCollateFunction()
for n_images in range(1, 10):
with self.subTest():
images = [
self._generate_random_image(100, 100, 3) for _ in range(n_images)
]
fig = debug.plot_augmented_images(images, collate_function)
self.assertIsNotNone(fig)
@unittest.skipUnless(MATPLOTLIB_AVAILABLE, "Matplotlib not installed")
def test_plot_augmented_images_no_images(self):
collate_function = collate.SimCLRCollateFunction()
with self.assertRaises(ValueError):
debug.plot_augmented_images([], collate_function)
@unittest.skipUnless(MATPLOTLIB_AVAILABLE, "Matplotlib not installed")
def test_plot_augmented_images_invalid_collate_function(self):
images = [self._generate_random_image(100, 100, 3)]
with self.assertRaises(ValueError):
debug.plot_augmented_images(images, None)
| 3,075 | 35.619048 | 85 | py |
lightly | lightly-master/tests/utils/test_dist.py | import unittest
from unittest import mock
import torch
from lightly.utils import dist
class TestDist(unittest.TestCase):
def test_eye_rank_undist(self):
self.assertTrue(torch.all(dist.eye_rank(3) == torch.eye(3)))
def test_eye_rank_dist(self):
n = 3
zeros = torch.zeros((n, n)).bool()
eye = torch.eye(n).bool()
for world_size in [1, 3]:
for rank in range(0, world_size):
with mock.patch(
"torch.distributed.is_initialized", lambda: True
), mock.patch(
"lightly.utils.dist.world_size", lambda: world_size
), mock.patch(
"lightly.utils.dist.rank", lambda: rank
):
expected = []
for _ in range(0, rank):
expected.append(zeros)
expected.append(eye)
for _ in range(rank, world_size - 1):
expected.append(zeros)
expected = torch.cat(expected, dim=1)
self.assertTrue(torch.all(dist.eye_rank(n) == expected))
| 1,158 | 33.088235 | 76 | py |
lightly | lightly-master/tests/utils/test_io.py | import csv
import json
import sys
import tempfile
import unittest
import numpy as np
from lightly.utils.io import (
check_embeddings,
check_filenames,
save_custom_metadata,
save_embeddings,
save_schema,
save_tasks,
)
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLICrop(MockedApiWorkflowSetup):
def test_save_metadata(self):
metadata = [("filename.jpg", {"random_metadata": 42})]
metadata_filepath = tempfile.mktemp(".json", "metadata")
save_custom_metadata(metadata_filepath, metadata)
def test_valid_filenames(self):
valid = "img.png"
non_valid = "img,1.png"
filenames_list = [
([valid], True),
([valid, valid], True),
([non_valid], False),
([valid, non_valid], False),
]
for filenames, valid in filenames_list:
with self.subTest(msg=f"filenames:{filenames}"):
if valid:
check_filenames(filenames)
else:
with self.assertRaises(ValueError):
check_filenames(filenames)
class TestEmbeddingsIO(unittest.TestCase):
def setUp(self):
# correct embedding file as created through lightly
self.embeddings_path = tempfile.mktemp(".csv", "embeddings")
embeddings = np.random.rand(32, 2)
labels = [0 for i in range(len(embeddings))]
filenames = [f"img_{i}.jpg" for i in range(len(embeddings))]
save_embeddings(self.embeddings_path, embeddings, labels, filenames)
def test_valid_embeddings(self):
check_embeddings(self.embeddings_path)
def test_whitespace_in_embeddings(self):
# should fail because there whitespaces in the header columns
lines = [
"filenames, embedding_0,embedding_1,labels\n",
"img_1.jpg, 0.351,0.1231",
]
with open(self.embeddings_path, "w") as f:
f.writelines(lines)
with self.assertRaises(RuntimeError) as context:
check_embeddings(self.embeddings_path)
self.assertTrue("must not contain whitespaces" in str(context.exception))
def test_no_labels_in_embeddings(self):
# should fail because there is no `labels` column in the header
lines = ["filenames,embedding_0,embedding_1\n", "img_1.jpg,0.351,0.1231"]
with open(self.embeddings_path, "w") as f:
f.writelines(lines)
with self.assertRaises(RuntimeError) as context:
check_embeddings(self.embeddings_path)
self.assertTrue("has no `labels` column" in str(context.exception))
def test_no_empty_rows_in_embeddings(self):
# should fail because there are empty rows in the embeddings file
lines = [
"filenames,embedding_0,embedding_1,labels\n",
"img_1.jpg,0.351,0.1231\n\n" "img_2.jpg,0.311,0.6231",
]
with open(self.embeddings_path, "w") as f:
f.writelines(lines)
with self.assertRaises(RuntimeError) as context:
check_embeddings(self.embeddings_path)
self.assertTrue("must not have empty rows" in str(context.exception))
def test_embeddings_extra_rows(self):
rows = [
["filenames", "embedding_0", "embedding_1", "labels", "selected", "masked"],
["image_0.jpg", "3.4", "0.23", "0", "1", "0"],
["image_1.jpg", "3.4", "0.23", "1", "0", "1"],
]
with open(self.embeddings_path, "w") as f:
csv_writer = csv.writer(f)
csv_writer.writerows(rows)
check_embeddings(self.embeddings_path, remove_additional_columns=True)
with open(self.embeddings_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row_read, row_original in zip(csv_reader, rows):
self.assertListEqual(row_read, row_original[:-2])
def test_embeddings_extra_rows_special_order(self):
input_rows = [
["filenames", "embedding_0", "embedding_1", "masked", "labels", "selected"],
["image_0.jpg", "3.4", "0.23", "0", "1", "0"],
["image_1.jpg", "3.4", "0.23", "1", "0", "1"],
]
correct_output_rows = [
["filenames", "embedding_0", "embedding_1", "labels"],
["image_0.jpg", "3.4", "0.23", "1"],
["image_1.jpg", "3.4", "0.23", "0"],
]
with open(self.embeddings_path, "w") as f:
csv_writer = csv.writer(f)
csv_writer.writerows(input_rows)
check_embeddings(self.embeddings_path, remove_additional_columns=True)
with open(self.embeddings_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
for row_read, row_original in zip(csv_reader, correct_output_rows):
self.assertListEqual(row_read, row_original)
def test_save_tasks(self):
tasks = [
"task1",
"task2",
"task3",
]
with tempfile.NamedTemporaryFile(suffix=".json") as file:
save_tasks(file.name, tasks)
with open(file.name, "r") as f:
loaded = json.load(f)
self.assertListEqual(tasks, loaded)
def test_save_schema(self):
description = "classification"
ids = [1, 2, 3, 4]
names = ["name1", "name2", "name3", "name4"]
expected_format = {
"task_type": "classification",
"categories": [
{"id": 1, "name": "name1"},
{"id": 2, "name": "name2"},
{"id": 3, "name": "name3"},
{"id": 4, "name": "name4"},
],
}
with tempfile.NamedTemporaryFile(suffix=".json") as file:
save_schema(file.name, description, ids, names)
with open(file.name, "r") as f:
loaded = json.load(f)
self.assertListEqual(sorted(expected_format), sorted(loaded))
def test_save_schema_different(self):
with self.assertRaises(ValueError):
save_schema(
"name_doesnt_matter",
"description_doesnt_matter",
[1, 2],
["name1"],
)
| 6,311 | 36.129412 | 88 | py |
lightly | lightly-master/tests/utils/test_scheduler.py | import unittest
import torch
from torch import nn
from lightly.utils.scheduler import CosineWarmupScheduler, cosine_schedule
class TestScheduler(unittest.TestCase):
def test_cosine_schedule(self):
self.assertAlmostEqual(cosine_schedule(1, 10, 0.99, 1.0), 0.99030154, 6)
self.assertAlmostEqual(cosine_schedule(95, 100, 0.7, 2.0), 1.99477063, 6)
self.assertAlmostEqual(cosine_schedule(0, 1, 0.996, 1.0), 1.0, 6)
self.assertAlmostEqual(cosine_schedule(10, 10, 0.0, 1.0), 1.0, 6)
with self.assertRaises(ValueError):
cosine_schedule(-1, 1, 0.0, 1.0)
with self.assertRaises(ValueError):
cosine_schedule(0, 0, 0.0, 1.0)
with self.assertRaises(ValueError):
cosine_schedule(1, 0, 0.0, 1.0)
with self.assertWarns(
RuntimeWarning, msg="Current step number 11 exceeds max_steps 10."
):
cosine_schedule(11, 10, 0.0, 1.0)
def test_CosineWarmupScheduler(self):
model = nn.Linear(10, 1)
optimizer = torch.optim.SGD(
model.parameters(), lr=1.0, momentum=0.0, weight_decay=0.0
)
scheduler = CosineWarmupScheduler(
optimizer, warmup_epochs=3, max_epochs=6, verbose=True, end_value=0.0
)
# warmup
self.assertAlmostEqual(scheduler.get_last_lr()[0], 0.333333333)
scheduler.step()
optimizer.step()
self.assertAlmostEqual(scheduler.get_last_lr()[0], 0.666666666)
scheduler.step()
optimizer.step()
self.assertAlmostEqual(scheduler.get_last_lr()[0], 1.0)
scheduler.step()
optimizer.step()
# cosine decay
self.assertAlmostEqual(scheduler.get_last_lr()[0], 1.0)
scheduler.step()
optimizer.step()
self.assertAlmostEqual(scheduler.get_last_lr()[0], 0.5)
scheduler.step()
optimizer.step()
self.assertAlmostEqual(scheduler.get_last_lr()[0], 0.0)
# extra step for Pytorch Lightning
scheduler.step()
optimizer.step()
self.assertAlmostEqual(scheduler.get_last_lr()[0], 0.0)
# step > max_epochs
with self.assertWarns(
RuntimeWarning, msg="Current step number 7 exceeds max_steps 6."
):
scheduler.step()
| 2,304 | 34.461538 | 81 | py |
lightly | lightly-master/tests/utils/test_version_compare.py | import unittest
from lightly.utils import version_compare
class TestVersionCompare(unittest.TestCase):
def test_valid_versions(self):
# general test of smaller than version numbers
self.assertEqual(version_compare.version_compare("0.1.4", "1.2.0"), -1)
self.assertEqual(version_compare.version_compare("1.1.0", "1.2.0"), -1)
# test bigger than
self.assertEqual(version_compare.version_compare("1.2.0", "1.1.0"), 1)
self.assertEqual(version_compare.version_compare("1.2.0", "0.1.4"), 1)
# test equal
self.assertEqual(version_compare.version_compare("1.2.0", "1.2.0"), 0)
def test_invalid_versions(self):
with self.assertRaises(ValueError):
version_compare.version_compare("1.2", "1.1.0")
with self.assertRaises(ValueError):
version_compare.version_compare("1.2.0.1", "1.1.0")
# test within same minor version and with special cases
with self.assertRaises(ValueError):
self.assertEqual(version_compare.version_compare("1.0.7", "1.1.0.dev1"), -1)
with self.assertRaises(ValueError):
self.assertEqual(
version_compare.version_compare("1.1.0.dev1", "1.1.0rc1"), -1
)
| 1,259 | 36.058824 | 88 | py |
lightly | lightly-master/tests/utils/benchmarking/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/tests/utils/benchmarking/test_benchmark_module.py | import unittest
import torch
from pytorch_lightning import Trainer
from torch.nn import CrossEntropyLoss, Flatten, Linear, Sequential
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
from lightly.data import LightlyDataset
from lightly.utils.benchmarking import BenchmarkModule
class TestBenchmarkModule:
def test(self, accelerator: str = "cpu") -> None:
torch.manual_seed(0)
dataset = LightlyDataset.from_torch_dataset(
FakeData(
size=10, image_size=(3, 32, 32), num_classes=2, transform=ToTensor()
)
)
dataloader = DataLoader(dataset, batch_size=2)
model = _DummyModel(dataloader_kNN=dataloader)
trainer = Trainer(max_epochs=2, accelerator=accelerator)
trainer.fit(
model,
train_dataloaders=dataloader,
val_dataloaders=dataloader,
)
assert model.max_accuracy == 1.0 # accuracy is 1.0 because knn_k=1
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available.")
def test_cuda(self) -> None:
self.test(accelerator="cuda")
def test_knn_train_val(self) -> None:
torch.manual_seed(0)
dataset_train = LightlyDataset.from_torch_dataset(
FakeData(
size=10, image_size=(3, 32, 32), num_classes=2, transform=ToTensor()
)
)
dataloader_train = DataLoader(dataset_train, batch_size=2)
dataset_val = LightlyDataset.from_torch_dataset(
FakeData(
size=10,
image_size=(3, 32, 32),
num_classes=2,
transform=ToTensor(),
random_offset=10,
)
)
dataloader_val = DataLoader(dataset_val, batch_size=2)
model = _DummyModel(dataloader_kNN=dataloader_train, knn_k=3)
trainer = Trainer(max_epochs=2)
trainer.fit(
model,
train_dataloaders=dataloader_train,
val_dataloaders=dataloader_val,
)
assert (
model.max_accuracy < 1.0
) # accuracy is <1.0 because train val are different
class _DummyModel(BenchmarkModule):
def __init__(self, dataloader_kNN, knn_k=1):
super().__init__(dataloader_kNN, num_classes=2, knn_k=knn_k)
self.backbone = Sequential(
Flatten(),
Linear(3 * 32 * 32, 2),
)
self.criterion = CrossEntropyLoss()
def training_step(self, batch, batch_idx):
images, targets, _ = batch
predictions = self.backbone(images)
loss = self.criterion(predictions, targets)
return loss
def configure_optimizers(self):
return SGD(self.backbone.parameters(), lr=0.1)
| 2,840 | 32.034884 | 84 | py |
lightly | lightly-master/tests/utils/benchmarking/test_knn.py | import torch
import torch.nn.functional as F
from lightly.utils.benchmarking import knn
def test_knn() -> None:
feature_bank = torch.tensor(
[
[1.0, 1.0, 1.0],
[-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
]
).t()
feature_labels = torch.tensor([0, 1, 2, 3])
features = torch.tensor(
[
[1.0, 1.0, 1.0],
[-1.0, 1.1, -1.0],
]
)
feature_bank = F.normalize(feature_bank, dim=0)
features = F.normalize(features, dim=1)
pred_labels = knn.knn_predict(
feature=features,
feature_bank=feature_bank,
feature_labels=feature_labels,
num_classes=4,
knn_k=4,
)
assert pred_labels.tolist() == [
[0, 1, 2, 3],
[1, 3, 0, 2],
]
def test_knn__knn_k() -> None:
feature_bank = torch.tensor(
[
[1.0, 1.0, 1.0],
[-1.0, 1.0, 1.0],
[-1.0, -1.0, 1.0],
[-1.0, -1.0, -1.0],
]
).t()
feature_labels = torch.tensor([0, 1, 0, 1])
features = torch.tensor(
[
[1.0, 1.0, 1.0],
[-1.0, 1.1, -1.0],
]
)
feature_bank = F.normalize(feature_bank, dim=0)
features = F.normalize(features, dim=1)
pred_labels = knn.knn_predict(
feature=features,
feature_bank=feature_bank,
feature_labels=feature_labels,
num_classes=4,
knn_k=2,
)
assert pred_labels.tolist() == [
[0, 1, 2, 3],
# 1 is first because bank features with index 1 and 3 and label 1 are closest.
# 0 is second because bank features with index 0 and 2 and label 0 are 2nd closest.
# 2 and 3 are last because there are no bank features with label 2 or 3.
[1, 0, 2, 3],
]
| 1,832 | 25.185714 | 91 | py |
lightly | lightly-master/tests/utils/benchmarking/test_knn_classifier.py | from typing import Tuple
import pytest
import torch
from pytorch_lightning import Trainer
from torch import Tensor, nn
from torch.utils.data import DataLoader, Dataset
from lightly.utils.benchmarking import KNNClassifier
class TestKNNClassifier:
def test(self) -> None:
# Define 4 training points from 4 classes.
train_features = torch.tensor(
[
[0.0, -1.0],
[0.0, 1.0],
[1.0, 0.0],
[1.0, 1.0],
]
)
train_targets = torch.tensor([0, 1, 2, 3])
train_dataset = _FeaturesDataset(features=train_features, targets=train_targets)
# Define 3 validation points.
# Their expected predicted labels are their closest training points in order.
val_features = torch.tensor(
[
[0.0, -0.4], # predicted_labels = [0, 1, 2, 3]
[0.6, 0.7], # predicted_labels = [3, 1, 2, 0]
[0.6, 0.3], # predicted_labels = [2, 3, 1, 0]
]
)
val_targets = torch.tensor([0, 0, 1])
val_dataset = _FeaturesDataset(features=val_features, targets=val_targets)
train_dataloader = DataLoader(train_dataset, batch_size=2)
val_dataloader = DataLoader(val_dataset, batch_size=2)
# Run KNN classifier.
model = nn.Identity()
classifier = KNNClassifier(model, num_classes=4, knn_k=3, topk=(1, 2, 3, 4))
trainer = Trainer(max_epochs=1, accelerator="cpu", devices=1)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
assert trainer.callback_metrics["val_top1"].item() == pytest.approx(1 / 3)
assert trainer.callback_metrics["val_top2"].item() == pytest.approx(1 / 3)
assert trainer.callback_metrics["val_top3"].item() == pytest.approx(2 / 3)
assert trainer.callback_metrics["val_top4"].item() == pytest.approx(3 / 3)
def test__cpu(self) -> None:
self._test__accelerator(accelerator="cpu", expected_device="cpu")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="No cuda available")
def test__cuda(self) -> None:
self._test__accelerator(accelerator="gpu", expected_device="cuda")
def _test__accelerator(self, accelerator: str, expected_device: str) -> None:
torch.manual_seed(0)
model = nn.Linear(3, 2)
classifier = KNNClassifier(model, num_classes=10, knn_k=20)
trainer = Trainer(max_epochs=1, accelerator=accelerator, devices=1)
train_features = torch.randn(40, 3)
train_targets = torch.randint(0, 10, (40,))
train_dataset = _FeaturesDataset(features=train_features, targets=train_targets)
val_features = torch.randn(10, 3)
val_targets = torch.randint(0, 10, (10,))
val_dataset = _FeaturesDataset(features=val_features, targets=val_targets)
train_dataloader = DataLoader(train_dataset, batch_size=3)
val_dataloader = DataLoader(val_dataset, batch_size=3)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
assert trainer.callback_metrics["val_top1"].item() >= 0.0
assert (
trainer.callback_metrics["val_top5"].item()
>= trainer.callback_metrics["val_top1"].item()
)
assert trainer.callback_metrics["val_top5"].item() <= 1.0
assert classifier._train_features == []
assert classifier._train_targets == []
assert classifier._train_features_tensor is not None
assert classifier._train_targets_tensor is not None
assert classifier._train_features_tensor.shape == (2, 40)
assert classifier._train_targets_tensor.shape == (40,)
assert classifier._train_features_tensor.dtype == torch.float32
assert classifier._train_features_tensor.device.type == expected_device
assert classifier._train_targets_tensor.device.type == expected_device
def test__features_dtype(self) -> None:
model = nn.Identity()
# Set feature_dtype to torch.int to test if classifier correctly changes dtype.
# We cannot test for torch.float16 because it is not supported on cpu.
classifier = KNNClassifier(
model, num_classes=10, knn_k=3, feature_dtype=torch.int
)
trainer = Trainer(max_epochs=1, accelerator="cpu", devices=1)
train_features = torch.randn(4, 3)
train_targets = torch.randint(0, 10, (4,))
train_dataset = _FeaturesDataset(features=train_features, targets=train_targets)
val_features = torch.randn(4, 3)
val_targets = torch.randint(0, 10, (4,))
val_dataset = _FeaturesDataset(features=val_features, targets=val_targets)
train_dataloader = DataLoader(train_dataset)
val_dataloader = DataLoader(val_dataset)
trainer.fit(
model=classifier,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
assert classifier._train_features_tensor is not None
assert classifier._train_features_tensor.dtype == torch.int
class _FeaturesDataset(Dataset):
def __init__(self, features: Tensor, targets) -> None:
super().__init__()
self.features = features
self.targets = targets
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
return self.features[index], self.targets[index]
def __len__(self) -> int:
return len(self.features)
| 5,630 | 41.659091 | 88 | py |
lightly | lightly-master/tests/utils/benchmarking/test_linear_classifier.py | import torch
from pytorch_lightning import Trainer
from torch import nn
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
from lightly.utils.benchmarking import LinearClassifier
class TestLinearClassifier:
def test__finetune(self) -> None:
"""Test the classifier in a finetune evaluation setting.
The test verifies that the model and classification head are updated.
"""
torch.manual_seed(0)
dataset = FakeData(
size=10, image_size=(3, 8, 8), num_classes=5, transform=ToTensor()
)
train_dataloader = DataLoader(dataset, batch_size=2)
val_dataloader = DataLoader(dataset, batch_size=2)
linear = nn.Linear(3 * 8 * 8, 4)
model = nn.Sequential(nn.Flatten(), linear)
initial_weights = linear.weight.clone()
linear_classifier = LinearClassifier(
model=model,
batch_size_per_device=2,
feature_dim=4,
num_classes=5,
freeze_model=False, # Don't freeze the model for finetuning.
)
initial_head_weights = linear_classifier.classification_head.weight.clone()
trainer = Trainer(max_epochs=1, accelerator="cpu", devices=1)
trainer.fit(linear_classifier, train_dataloader, val_dataloader)
assert trainer.callback_metrics["train_loss"].item() > 0
assert trainer.callback_metrics["train_top1"].item() >= 0
assert (
trainer.callback_metrics["train_top5"].item()
>= trainer.callback_metrics["train_top1"].item()
)
assert trainer.callback_metrics["train_top5"].item() <= 1
assert trainer.callback_metrics["val_loss"].item() > 0
assert trainer.callback_metrics["val_top1"].item() >= 0
assert (
trainer.callback_metrics["val_top5"].item()
>= trainer.callback_metrics["val_top1"].item()
)
assert trainer.callback_metrics["val_top5"].item() <= 1
# Verify that weights were updated.
assert not torch.all(torch.eq(initial_weights, linear.weight))
# Verify that head weights were updated.
assert not torch.all(
torch.eq(initial_head_weights, linear_classifier.classification_head.weight)
)
def test__linear(self) -> None:
"""Test the classifier in a linear evaluation setting.
The test verifies that only the classification head is updated and the model
remains unchanged.
"""
torch.manual_seed(0)
dataset = FakeData(
size=10, image_size=(3, 8, 8), num_classes=5, transform=ToTensor()
)
train_dataloader = DataLoader(dataset, batch_size=2)
val_dataloader = DataLoader(dataset, batch_size=2)
linear = nn.Linear(3 * 8 * 8, 4)
model = nn.Sequential(nn.Flatten(), linear)
initial_weights = linear.weight.clone()
linear_classifier = LinearClassifier(
model=model,
batch_size_per_device=2,
feature_dim=4,
num_classes=5,
freeze_model=True, # Freeze the model for finetuning.
)
initial_head_weights = linear_classifier.classification_head.weight.clone()
trainer = Trainer(max_epochs=1, accelerator="cpu", devices=1)
trainer.fit(linear_classifier, train_dataloader, val_dataloader)
assert trainer.callback_metrics["train_loss"].item() > 0
assert trainer.callback_metrics["train_top1"].item() >= 0
assert (
trainer.callback_metrics["train_top5"].item()
>= trainer.callback_metrics["train_top1"].item()
)
assert trainer.callback_metrics["train_top5"].item() <= 1
assert trainer.callback_metrics["val_loss"].item() > 0
assert trainer.callback_metrics["val_top1"].item() >= 0
assert (
trainer.callback_metrics["val_top5"].item()
>= trainer.callback_metrics["val_top1"].item()
)
assert trainer.callback_metrics["val_top5"].item() <= 1
# Verify that model weights were not updated.
assert torch.all(torch.eq(initial_weights, linear.weight))
# Verify that head weights were updated.
assert not torch.all(
torch.eq(initial_head_weights, linear_classifier.classification_head.weight)
)
| 4,403 | 41.346154 | 88 | py |
lightly | lightly-master/tests/utils/benchmarking/test_metric_callback.py | import torch
from pytorch_lightning import LightningModule, Trainer
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
from lightly.utils.benchmarking import MetricCallback
class TestMetricCallback:
def test(self) -> None:
callback = MetricCallback()
trainer = Trainer(accelerator="cpu", callbacks=[callback], max_epochs=3)
dataset = FakeData(
size=10, image_size=(3, 32, 32), num_classes=10, transform=ToTensor()
)
train_dataloader = DataLoader(dataset, batch_size=2)
val_dataloader = DataLoader(dataset, batch_size=2)
trainer.fit(
_DummyModule(),
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
assert callback.train_metrics["train_epoch"] == [0, 1, 2]
assert callback.train_metrics["train_epoch_dict"] == [0, 1, 2]
# test logs 2 * epoch in validation step
assert callback.val_metrics["val_epoch"] == [0, 2, 4]
assert callback.val_metrics["val_epoch_dict"] == [0, 2, 4]
class _DummyModule(LightningModule):
def __init__(self) -> None:
super().__init__()
def training_step(self, batch, batch_idx) -> None:
self.log("train_epoch", self.trainer.current_epoch)
self.log_dict({"train_epoch_dict": self.trainer.current_epoch})
def validation_step(self, batch, batch_idx) -> None:
self.log("val_epoch", self.trainer.current_epoch * 2)
self.log_dict({"val_epoch_dict": self.trainer.current_epoch * 2})
def configure_optimizers(self) -> None:
return None
| 1,673 | 36.2 | 81 | py |
lightly | lightly-master/tests/utils/benchmarking/test_online_linear_classifier.py | import pytest
import torch
from pytorch_lightning import LightningModule, Trainer
from torch import Tensor, nn
from torch.optim import SGD
from torch.utils.data import DataLoader
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
from lightly.utils.benchmarking import OnlineLinearClassifier
class TestOnlineLinearClassifier:
def test__cpu(self) -> None:
self._test(accelerator="cpu")
@pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA not available")
def test__cuda(self) -> None:
self._test(accelerator="gpu")
def _test(self, accelerator: str) -> None:
dataset = FakeData(
size=10, image_size=(3, 8, 8), num_classes=5, transform=ToTensor()
)
train_dataloader = DataLoader(dataset, batch_size=2)
val_dataloader = DataLoader(dataset, batch_size=2)
model = _DummyModule()
trainer = Trainer(
max_epochs=1, accelerator=accelerator, devices=1, log_every_n_steps=1
)
trainer.fit(
model=model,
train_dataloaders=train_dataloader,
val_dataloaders=val_dataloader,
)
assert trainer.callback_metrics["train_online_cls_loss"].item() >= 0
assert trainer.callback_metrics["train_online_cls_top1"].item() >= 0
assert (
trainer.callback_metrics["train_online_cls_top5"].item()
>= trainer.callback_metrics["train_online_cls_top1"].item()
)
assert trainer.callback_metrics["train_online_cls_top5"].item() <= 1
assert trainer.callback_metrics["val_online_cls_loss"].item() >= 0
assert trainer.callback_metrics["val_online_cls_top1"].item() >= 0
assert (
trainer.callback_metrics["val_online_cls_top5"].item()
>= trainer.callback_metrics["val_online_cls_top1"].item()
)
assert trainer.callback_metrics["val_online_cls_top5"].item() <= 1
class _DummyModule(LightningModule):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Sequential(nn.Flatten(), nn.Linear(3 * 8 * 8, 3))
self.online_classifier = OnlineLinearClassifier(feature_dim=3, num_classes=5)
def training_step(self, batch, batch_idx) -> Tensor:
images, targets = batch[0], batch[1]
features = self.linear(images)
cls_loss, cls_log = self.online_classifier.training_step(
(features, targets), batch_idx
)
self.log_dict(cls_log)
return cls_loss
def validation_step(self, batch, batch_idx) -> Tensor:
images, targets = batch[0], batch[1]
features = self.linear(images)
cls_loss, cls_log = self.online_classifier.validation_step(
(features, targets), batch_idx
)
self.log_dict(cls_log)
return cls_loss
def configure_optimizers(self) -> SGD:
return SGD(self.parameters(), lr=0.1)
| 2,948 | 36.807692 | 85 | py |
lightly | lightly-master/tests/utils/benchmarking/test_topk.py | import torch
from lightly.utils.benchmarking import topk
def test_mean_topk_accuracy() -> None:
predicted_classes = torch.tensor(
[
[1, 2, 3, 4],
[4, 1, 10, 0],
[3, 1, 5, 8],
]
)
targets = torch.tensor([1, 10, 8])
assert topk.mean_topk_accuracy(predicted_classes, targets, k=(1, 2, 3, 4, 5)) == {
1: 1 / 3,
2: 1 / 3,
3: 2 / 3,
4: 1.0,
5: 1.0,
}
| 460 | 19.954545 | 86 | py |
BioNEV | BioNEV-master/README.md | # BioNEV (Biomedical Network Embedding Evaluation)
## 1. Introduction
This repository contains source code and datasets for paper ["Graph Embedding on Biomedical Networks: Methods, Applications, and Evaluations"](https://arxiv.org/pdf/1906.05017.pdf) (accepted by **Bioinformatics**). This work aims to systematically evaluate recent advanced graph embedding techniques on biomedical tasks. We compile 5 benchmark datasets for 4 biomedical prediction tasks (see paper for details) and use them to evaluate 11 representative graph embedding methods selected from different categories:
- 5 matrix factorization-based: Laplacian Eigenmap, SVD, Graph Factorization, HOPE, GraRep
- 3 random walk-based: DeepWalk, node2vec, struc2vec
- 3 neural network-based: LINE, SDNE, GAE
The code can also be applied to graphs in other domains (e.g., social networks, citation networks). More experimental details can be found in [**Supplementary Materials**](Supplementary%20Materials.pdf).
Please kindly cite the paper if you use the code, datasets or any results in this repo or in the paper:
```
@article{yue2020graph,
title={Graph embedding on biomedical networks: methods, applications and evaluations},
author={Yue, Xiang and Wang, Zhen and Huang, Jingong and Parthasarathy, Srinivasan and Moosavinasab, Soheil and Huang, Yungui and Lin, Simon M and Zhang, Wen and Zhang, Ping and Sun, Huan},
journal={Bioinformatics},
volume={36},
number={4},
pages={1241--1251},
year={2020},
publisher={Oxford University Press}
}
```
## 2. Pipeline

Fig. 1: Pipeline for applying graph embedding methods to biomedical tasks. Low-dimensional node
representations are
first learned from biomedical networks by graph embedding methods and then used as features to build
specific classifiers for different tasks. For (a) matrix factorization-based methods, they use a data
matrix (e.g., adjacency matrix) as the input to learn embeddings through matrix factorization. For (b)
random walk-based methods, they first generate sequences of nodes through random walks and then feed the
sequences into the word2vec model to learn node representations. For (c)
neural network-based methods, their architectures and inputs vary from different models.
## 3. Dataset
Datasets used in the paper:
### Link Prediction
- [CTD DDA](data/CTD_DDA) : a drug-disease association graph extracted from [Comparative Toxicogenomics Database](http://ctdbase.org/downloads/)
- [NDFRT DDA](data/NDFRT_DDA) : a drug-disease association graph extracted from [UMLS National Drug File](https://www.nlm.nih.gov/research/umls/sourcereleasedocs/current/NDFRT/)
- [DrugBank DDi](data/DrugBank_DDI) : a drug-drug interaction graph extracted from [DrugBank database](https://www.drugbank.ca/)
- [STRING PPI](data/STRING_PPI) : a protein-protein interaction graph extracted from [STRING database](https://string-db.org/)
### Node Classification
- [Clin Term COOC](data/Clin_Term_COOC) : a medical term-term co-occurrence graph from (Finlayson et al., 2014) [[source data]](https://datadryad.org//resource/doi:10.5061/dryad.jp917), [[paper]](https://doi.org/10.1038/sdata.2014.32)
- [node2vec PPI](data/node2vec_PPI): a PPI graph with functional annotations used in [node2vec](https://snap.stanford.edu/node2vec/) (Grover and Leskovec, 2016)
- [Mashup PPI](data/Mashup_PPI): a experimental PPI graph with functional annotations used in [Mashup](http://cb.csail.mit.edu/cb/mashup/) (Cho et al., 2016)
Statistics:
| Task Type | Dataset | #nodes | #edges | Density | #labels |
|:-------------------:|:--------------:|:------:|:---------:|:-------:|:-------:|
| | CTD DDA | 12,765 | 92,813 | 0.11% | - |
| | NDFRT DDA | 13,545 | 56,515 | 0.06% | - |
| Link Prediction | DrugBank DDI | 2,191 | 242,027 | 10.08% | - |
| | STRING PPI | 15,131 | 359,776 | 0.31% | - |
| | Clin Term COOC | 48,651 | 1,659,249 | 0.14% | 31 |
| Node Classification | node2vec PPI | 3,890 | 76,584 | 1.01% | 50 |
| | Mashup PPI | 16,143 | 300,181 | 0.23% | 28 |
## 4. *Pre-trained Embeddings*
We also release the best-performing pre-trained representations of nodes (e.g., drugs, diseases, proteins, UMLS concepts) on each dataset.
These pre-trained vectors can be used as:
- External representations to complement the biological features.
In the paper, we showed that by adding the network embedding feature into an existing computational
model
for predicting drug-disease associations, the performance is further improved (Section 4.3 in the
paper).
- Initialized values of the embedding vectors before training. We can initialize the embedding vector
for each node on a graph
with its pre-trained embedding (e.g., by looking for the corresponding entity in pre-trained vocab
look-up table rather than by random
initialization, and then continue training various graph embedding methods as before (which is often
referred
to as “fine-tuning”). We conducted experiment with this "transfer learning" idea on
the "CTD DDA" graph and showed the improvement (Section 5 in the paper).
All the pretrained vectors can be downloaded [here](http://web.cse.ohio-state.edu/~yue.149/BioNEV/pretrained/).
The files are formatted as:
node_num, embedding_dimension
index_1, embedding vector 1
index_2, embedding vector 2
...
The corresponding index to node name (or their original ID) can be found in the each dataset directory.
## 5. Code
The graph embedding learning for Laplician Eigenmap, Graph Factorization, HOPE, GraRep, DeepWalk, node2vec, LINE, SDNE uses the code from [OpenNE](https://github.com/thunlp/OpenNE)
The code of [struc2vec](https://github.com/leoribeiro/struc2vec) and [GAE](https://github.com/tkipf/gae) is from their authors.
To ensure different source code could run successfully in our framework, we modify part of their source code.
#### Installation
Use the following command to install directly from GitHub;
```bash
$ pip install git+https://github.com/xiangyue9607/BioNEV.git
```
Alternatively, use the following commands to install the latest code in development mode (using `-e`):
```bash
$ git clone https://github.com/xiangyue9607/BioNEV.git
$ cd BioNEV
$ pip install -e .
```
#### General Options
- --input, input graph file. Only accepted edgelist format.
- --output, output graph embedding file.
- --task, choose to evaluate the embedding quality based on a specific prediction task (i.e., link-prediction, node-classification, none (no eval), default is none)
- --testing-ratio, testing set ratio for prediction tasks. Only applied when --task is not none. The default is 0.2
- --dimensions, the dimensions of embedding for each node. The default is 100.
- --method, the name of embedding method
- --label-file, the label file for node classification.
- --weighted, true if the input graph is weighted. The default is False.
- --eval-result-file, the filename of eval result (save the evaluation result into a file). Skip it if there is no need.
- --seed, random seed. The default is 0.
#### Specific Options
- Matrix Factorization-based methods:
- --kstep, k-step transition probability matrix for GraRep. The default is 4. It must divide the --dimension.
- --weight-decay, coefficient for L2 regularization for Graph Factorization. The default is 5e-4.
- --lr, learning rate for gradient descent in Graph Factorization. The default is 0.01.
- Random Walk-based methods:
- --number-walks, the number of random walks to start at each node.
- --walk-length, the length of the random walk started at each node.
- --window-size, window size of node sequence.
- --p, --q, two parameters that control how fast the walk explores and leaves the neighborhood of starting node. The default values of p, q are 1.0.
- --OPT1, --OPT2, --OPT3, three running time efficiency optimization strategies for struc2vec. The default values are True.
- --until-layer, calculation until the layer. A hyper-parameter for struc2vec. The default is 6.
- Neural Network-based methods:
- --lr, learning rate for gradient descent. The default is 0.01.
- --epochs, training epochs. The default is 5. Suggest to set a small value for LINE and SDNE (e.g., 5), and a large value for GAE (e.g., 500).
- --bs, batch size. Only applied for SDNE. The default is 200.
- --negative-ratio, the negative sampling ratio for LINE. The default is 5.
- --order, the order of LINE, 1 means first order, 2 means second order, 3 means first order + second order. The default is 2.
- --alpha, a hyperparameter in SDNE that balances the weight of 1st-order and 2nd-order proximities. The default is 0.3.
- --beta', a hyperparameter in SDNE that controls the reconstruction weight of the nonzero elementsin the training graph. The default is 0.
- --dropout, dropout rate. Only applied for GAE. The default is 0.
- --hidden, number of units in hidden layer. Only applied for GAE. The default is 32.
- --gae_model_selection, GAE model variants: gcn_ae or gcn_vae. The default is gcn_ae.
#### Running example
```
bionev --input ./data/DrugBank_DDI/DrugBank_DDI.edgelist \
--output ./embeddings/DeepWalk_DrugBank_DDI.txt \
--method DeepWalk \
--task link-prediction \
--eval-result-file eval_result.txt
```
```
bionev --input ./data/Clin_Term_COOC/Clin_Term_COOC.edgelist \
--label-file ./data/Clin_Term_COOC/Clin_Term_COOC_labels.txt \
--output ./embeddings/LINE_COOC.txt \
--method LINE \
--task node-classification \
--weighted True
```
## 6. Contact
Feel free to contact [Xiang Yue](https://xiangyue9607.github.io/) <yue.149 AT osu DOT edu> or [Huan Sun](http://web.cse.ohio-state.edu/~sun.397/) <sun.397 AT osu DOT edu> for any questions about the paper, datsaets, code and results.
| 10,080 | 57.953216 | 513 | md |
BioNEV | BioNEV-master/setup.py | # -*- coding: utf-8 -*-
"""Setup module."""
import setuptools
if __name__ == '__main__':
setuptools.setup()
| 115 | 11.888889 | 26 | py |
BioNEV | BioNEV-master/src/bionev/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
BioNEV | BioNEV-master/src/bionev/__main__.py | # -*- coding: utf-8 -*-
"""Entrypoint module, in case you use ``python -m bionev``.
Why does this file exist, and why ``__main__``? For more info, read:
- https://www.python.org/dev/peps/pep-0338/
- https://docs.python.org/3/using/cmdline.html#cmdoption-m
"""
from .main import more_main
if __name__ == '__main__':
more_main()
| 337 | 23.142857 | 68 | py |
BioNEV | BioNEV-master/src/bionev/embed_train.py | # -*- coding: utf-8 -*-
import ast
import logging
import os
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
from bionev.GAE.train_model import gae_model
from bionev.OpenNE import gf, grarep, hope, lap, line, node2vec, sdne
from bionev.SVD.model import SVD_embedding
from bionev.struc2vec import struc2vec
from bionev.utils import *
def embedding_training(args, train_graph_filename):
if args.method == 'struc2vec':
g = read_for_struc2vec(train_graph_filename)
elif args.method == 'GAE':
g = read_for_gae(train_graph_filename)
elif args.method == 'SVD':
g = read_for_SVD(train_graph_filename, weighted=args.weighted)
else:
g = read_for_OpenNE(train_graph_filename, weighted=args.weighted)
_embedding_training(args, G_=g)
return
def _embedding_training(args, G_=None):
seed=args.seed
if args.method == 'struc2vec':
logging.basicConfig(filename='./src/bionev/struc2vec/struc2vec.log', filemode='w', level=logging.DEBUG,
format='%(asctime)s %(message)s')
if (args.OPT3):
until_layer = args.until_layer
else:
until_layer = None
G = struc2vec.Graph(G_, args.workers, untilLayer=until_layer)
if (args.OPT1):
G.preprocess_neighbors_with_bfs_compact()
else:
G.preprocess_neighbors_with_bfs()
if (args.OPT2):
G.create_vectors()
G.calc_distances(compactDegree=args.OPT1)
else:
G.calc_distances_all_vertices(compactDegree=args.OPT1)
print('create distances network..')
G.create_distances_network()
print('begin random walk...')
G.preprocess_parameters_random_walk()
G.simulate_walks(args.number_walks, args.walk_length)
print('walk finished..\nLearning embeddings...')
walks = LineSentence('random_walks.txt')
model = Word2Vec(walks, size=args.dimensions, window=args.window_size, min_count=0, hs=1, sg=1,
workers=args.workers, seed=seed)
os.remove("random_walks.txt")
model.wv.save_word2vec_format(args.output)
elif args.method == 'GAE':
model = gae_model(args)
G = G_[0]
node_list = G_[1]
model.train(G)
# save embeddings
model.save_embeddings(args.output, node_list)
elif args.method == 'SVD':
SVD_embedding(G_, args.output, size=args.dimensions)
else:
if args.method == 'Laplacian':
model = lap.LaplacianEigenmaps(G_, rep_size=args.dimensions)
elif args.method == 'GF':
model = gf.GraphFactorization(G_, rep_size=args.dimensions,
epoch=args.epochs, learning_rate=args.lr, weight_decay=args.weight_decay)
elif args.method == 'HOPE':
model = hope.HOPE(graph=G_, d=args.dimensions)
elif args.method == 'GraRep':
model = grarep.GraRep(graph=G_, Kstep=args.kstep, dim=args.dimensions)
elif args.method == 'DeepWalk':
model = node2vec.Node2vec(graph=G_, path_length=args.walk_length,
num_paths=args.number_walks, dim=args.dimensions,
workers=args.workers, window=args.window_size, dw=True)
elif args.method == 'node2vec':
model = node2vec.Node2vec(graph=G_, path_length=args.walk_length,
num_paths=args.number_walks, dim=args.dimensions,
workers=args.workers, p=args.p, q=args.q, window=args.window_size)
elif args.method == 'LINE':
model = line.LINE(G_, epoch=args.epochs,
rep_size=args.dimensions, order=args.order)
elif args.method == 'SDNE':
encoder_layer_list = ast.literal_eval(args.encoder_list)
model = sdne.SDNE(G_, encoder_layer_list=encoder_layer_list,
alpha=args.alpha, beta=args.beta, nu1=args.nu1, nu2=args.nu2,
batch_size=args.bs, epoch=args.epochs, learning_rate=args.lr)
else:
raise ValueError(f'Invalid method: {args.method}')
print("Saving embeddings...")
model.save_embeddings(args.output)
return
| 4,371 | 36.367521 | 115 | py |
BioNEV | BioNEV-master/src/bionev/evaluation.py | # -*- coding: utf-8 -*-
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, average_precision_score, f1_score, roc_auc_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
from bionev.utils import *
def LinkPrediction(embedding_look_up, original_graph, train_graph, test_pos_edges, seed):
random.seed(seed)
train_neg_edges = generate_neg_edges(original_graph, len(train_graph.edges()), seed)
# create a auxiliary graph to ensure that testing negative edges will not used in training
G_aux = copy.deepcopy(original_graph)
G_aux.add_edges_from(train_neg_edges)
test_neg_edges = generate_neg_edges(G_aux, len(test_pos_edges), seed)
# construct X_train, y_train, X_test, y_test
X_train = []
y_train = []
for edge in train_graph.edges():
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(1)
for edge in train_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_train.append(feature_vector)
y_train.append(0)
X_test = []
y_test = []
for edge in test_pos_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(1)
for edge in test_neg_edges:
node_u_emb = embedding_look_up[edge[0]]
node_v_emb = embedding_look_up[edge[1]]
feature_vector = np.append(node_u_emb, node_v_emb)
X_test.append(feature_vector)
y_test.append(0)
# shuffle for training and testing
c = list(zip(X_train, y_train))
random.shuffle(c)
X_train, y_train = zip(*c)
c = list(zip(X_test, y_test))
random.shuffle(c)
X_test, y_test = zip(*c)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
clf1 = LogisticRegression(random_state=seed, solver='lbfgs')
clf1.fit(X_train, y_train)
y_pred_proba = clf1.predict_proba(X_test)[:, 1]
y_pred = clf1.predict(X_test)
auc_roc = roc_auc_score(y_test, y_pred_proba)
auc_pr = average_precision_score(y_test, y_pred_proba)
accuracy = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print('#' * 9 + ' Link Prediction Performance ' + '#' * 9)
print(f'AUC-ROC: {auc_roc:.3f}, AUC-PR: {auc_pr:.3f}, Accuracy: {accuracy:.3f}, F1: {f1:.3f}')
print('#' * 50)
return auc_roc, auc_pr, accuracy, f1
def NodeClassification(embedding_look_up, node_list, labels, testing_ratio, seed):
X_train, y_train, X_test, y_test = split_train_test_classify(embedding_look_up, node_list, labels,
testing_ratio=testing_ratio,seed=seed)
binarizer = MultiLabelBinarizer(sparse_output=True)
y_all = np.append(y_train, y_test)
binarizer.fit(y_all)
y_train = binarizer.transform(y_train).todense()
y_test = binarizer.transform(y_test).todense()
model = OneVsRestClassifier(LogisticRegression(random_state=seed, solver='lbfgs'))
model.fit(X_train, y_train)
y_pred_prob = model.predict_proba(X_test)
## small trick : we assume that we know how many label to predict
y_pred = get_y_pred(y_test, y_pred_prob)
accuracy = accuracy_score(y_test, y_pred)
micro_f1 = f1_score(y_test, y_pred, average="micro")
macro_f1 = f1_score(y_test, y_pred, average="macro")
print('#' * 9 + ' Node Classification Performance ' + '#' * 9)
print(f'Accuracy: {accuracy:.3f}, Micro-F1: {micro_f1:.3f}, Macro-F1: {macro_f1:.3f}')
print('#' * 50)
return accuracy, micro_f1, macro_f1
| 4,002 | 37.12381 | 103 | py |
BioNEV | BioNEV-master/src/bionev/main.py | # -*- coding: utf-8 -*-
import datetime
import getpass
import json
import os
import random
import time
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
import numpy as np
from bionev.embed_train import embedding_training, load_embedding, read_node_labels, split_train_test_graph
from bionev.evaluation import LinkPrediction, NodeClassification
def parse_args():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('--input', required=True,
help='Input graph file. Only accepted edgelist format.')
parser.add_argument('--output',
help='Output graph embedding file', required=True)
parser.add_argument('--task', choices=[
'none',
'link-prediction',
'node-classification'], default='none',
help='Choose to evaluate the embedding quality based on a specific prediction task. '
'None represents no evaluation, and only run for training embedding.')
parser.add_argument('--testingratio', default=0.2, type=float,
help='Testing set ratio for prediction tasks.'
'In link prediction, it splits all the known edges; '
'in node classification, it splits all the labeled nodes.')
parser.add_argument('--number-walks', default=32, type=int,
help='Number of random walks to start at each node. '
'Only for random walk-based methods: DeepWalk, node2vec, struc2vec')
parser.add_argument('--walk-length', default=64, type=int,
help='Length of the random walk started at each node. '
'Only for random walk-based methods: DeepWalk, node2vec, struc2vec')
parser.add_argument('--workers', default=8, type=int,
help='Number of parallel processes. '
'Only for random walk-based methods: DeepWalk, node2vec, struc2vec')
parser.add_argument('--dimensions', default=100, type=int,
help='the dimensions of embedding for each node.')
parser.add_argument('--window-size', default=10, type=int,
help='Window size of word2vec model. '
'Only for random walk-based methods: DeepWalk, node2vec, struc2vec')
parser.add_argument('--epochs', default=5, type=int,
help='The training epochs of LINE, SDNE and GAE')
parser.add_argument('--p', default=1.0, type=float,
help='p is a hyper-parameter for node2vec, '
'and it controls how fast the walk explores.')
parser.add_argument('--q', default=1.0, type=float,
help='q is a hyper-parameter for node2vec, '
'and it controls how fast the walk leaves the neighborhood of starting node.')
parser.add_argument('--method', required=True, choices=[
'Laplacian',
'GF',
'SVD',
'HOPE',
'GraRep',
'DeepWalk',
'node2vec',
'struc2vec',
'LINE',
'SDNE',
'GAE'
], help='The embedding learning method')
parser.add_argument('--label-file', default='',
help='The label file for node classification')
parser.add_argument('--negative-ratio', default=5, type=int,
help='the negative ratio of LINE')
parser.add_argument('--weighted', type=bool, default=False,
help='Treat graph as weighted')
parser.add_argument('--directed', type=bool, default=False,
help='Treat graph as directed')
parser.add_argument('--order', default=2, type=int,
help='Choose the order of LINE, 1 means first order, 2 means second order, 3 means first order + second order')
parser.add_argument('--weight-decay', type=float, default=5e-4,
help='coefficient for L2 regularization for Graph Factorization.')
parser.add_argument('--kstep', default=4, type=int,
help='Use k-step transition probability matrix for GraRep.')
parser.add_argument('--lr', default=0.01, type=float,
help='learning rate')
parser.add_argument('--alpha', default=0.3, type=float,
help='alhpa is a hyperparameter in SDNE')
parser.add_argument('--beta', default=0, type=float,
help='beta is a hyperparameter in SDNE')
parser.add_argument('--nu1', default=1e-5, type=float,
help='nu1 is a hyperparameter in SDNE')
parser.add_argument('--nu2', default=1e-4, type=float,
help='nu2 is a hyperparameter in SDNE')
parser.add_argument('--bs', default=200, type=int,
help='batch size of SDNE')
parser.add_argument('--encoder-list', default='[1000, 128]', type=str,
help='a list of numbers of the neuron at each encoder layer, the last number is the '
'dimension of the output node representation')
parser.add_argument('--OPT1', default=True, type=bool,
help='optimization 1 for struc2vec')
parser.add_argument('--OPT2', default=True, type=bool,
help='optimization 2 for struc2vec')
parser.add_argument('--OPT3', default=True, type=bool,
help='optimization 3 for struc2vec')
parser.add_argument('--until-layer', type=int, default=6,
help='Calculation until the layer. A hyper-parameter for struc2vec.')
parser.add_argument('--dropout', default=0, type=float, help='Dropout rate (1 - keep probability).')
parser.add_argument('--hidden', default=32, type=int, help='Number of units in hidden layer.')
parser.add_argument('--gae_model_selection', default='gcn_ae', type=str,
help='gae model selection: gcn_ae or gcn_vae')
parser.add_argument('--eval-result-file', help='save evaluation performance')
parser.add_argument('--seed',default=0, type=int, help='seed value')
args = parser.parse_args()
return args
def main(args):
print('#' * 70)
print('Embedding Method: %s, Evaluation Task: %s' % (args.method, args.task))
print('#' * 70)
if args.task == 'link-prediction':
G, G_train, testing_pos_edges, train_graph_filename = split_train_test_graph(args.input, args.seed, weighted=args.weighted)
time1 = time.time()
embedding_training(args, train_graph_filename)
embed_train_time = time.time() - time1
print('Embedding Learning Time: %.2f s' % embed_train_time)
embedding_look_up = load_embedding(args.output)
time1 = time.time()
print('Begin evaluation...')
result = LinkPrediction(embedding_look_up, G, G_train, testing_pos_edges,args.seed)
eval_time = time.time() - time1
print('Prediction Task Time: %.2f s' % eval_time)
os.remove(train_graph_filename)
elif args.task == 'node-classification':
if not args.label_file:
raise ValueError("No input label file. Exit.")
node_list, labels = read_node_labels(args.label_file)
train_graph_filename = args.input
time1 = time.time()
embedding_training(args, train_graph_filename)
embed_train_time = time.time() - time1
print('Embedding Learning Time: %.2f s' % embed_train_time)
embedding_look_up = load_embedding(args.output, node_list)
time1 = time.time()
print('Begin evaluation...')
result = NodeClassification(embedding_look_up, node_list, labels, args.testingratio, args.seed)
eval_time = time.time() - time1
print('Prediction Task Time: %.2f s' % eval_time)
else:
train_graph_filename = args.input
time1 = time.time()
embedding_training(args, train_graph_filename)
embed_train_time = time.time() - time1
print('Embedding Learning Time: %.2f s' % embed_train_time)
if args.eval_result_file and result:
_results = dict(
input=args.input,
task=args.task,
method=args.method,
dimension=args.dimensions,
user=getpass.getuser(),
date=datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S'),
seed=args.seed,
)
if args.task == 'link-prediction':
auc_roc, auc_pr, accuracy, f1 = result
_results['results'] = dict(
auc_roc=auc_roc,
auc_pr=auc_pr,
accuracy=accuracy,
f1=f1,
)
else:
accuracy, f1_micro, f1_macro = result
_results['results'] = dict(
accuracy=accuracy,
f1_micro=f1_micro,
f1_macro=f1_macro,
)
with open(args.eval_result_file, 'a+') as wf:
print(json.dumps(_results, sort_keys=True), file=wf)
def more_main():
args = parse_args()
seed = args.seed
random.seed(seed)
np.random.seed(seed)
main(parse_args())
if __name__ == "__main__":
more_main()
| 9,368 | 46.318182 | 135 | py |
BioNEV | BioNEV-master/src/bionev/utils.py | # -*- coding: utf-8 -*-
import copy
import itertools
import random
import networkx as nx
import numpy as np
import bionev.OpenNE.graph as og
import bionev.struc2vec.graph as sg
def read_for_OpenNE(filename, weighted=False):
G = og.Graph()
print("Loading training graph for learning embedding...")
G.read_edgelist(filename=filename, weighted=weighted)
print("Graph Loaded...")
return G
def read_for_struc2vec(filename):
print("Loading training graph for learning embedding...")
G = sg.load_edgelist(filename, undirected=True)
print("Graph Loaded...")
return G
def read_for_gae(filename, weighted=False):
print("Loading training graph for learning embedding...")
edgelist = np.loadtxt(filename, dtype='float')
if weighted:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0]) if
edgelist[idx, 2] > 0]
else:
edgelist = [(int(edgelist[idx, 0]), int(edgelist[idx, 1])) for idx in range(edgelist.shape[0])]
G=nx.from_edgelist(edgelist)
node_list=list(G.nodes)
adj = nx.adjacency_matrix(G, nodelist=node_list)
print("Graph Loaded...")
return (adj,node_list)
def read_for_SVD(filename, weighted=False):
if weighted:
G = nx.read_weighted_edgelist(filename)
else:
G = nx.read_edgelist(filename)
return G
def split_train_test_graph(input_edgelist, seed, testing_ratio=0.2, weighted=False):
if (weighted):
G = nx.read_weighted_edgelist(input_edgelist)
else:
G = nx.read_edgelist(input_edgelist)
node_num1, edge_num1 = len(G.nodes), len(G.edges)
print('Original Graph: nodes:', node_num1, 'edges:', edge_num1)
testing_edges_num = int(len(G.edges) * testing_ratio)
random.seed(seed)
testing_pos_edges = random.sample(G.edges, testing_edges_num)
G_train = copy.deepcopy(G)
for edge in testing_pos_edges:
node_u, node_v = edge
if (G_train.degree(node_u) > 1 and G_train.degree(node_v) > 1):
G_train.remove_edge(node_u, node_v)
G_train.remove_nodes_from(nx.isolates(G_train))
node_num2, edge_num2 = len(G_train.nodes), len(G_train.edges)
assert node_num1 == node_num2
train_graph_filename = 'graph_train.edgelist'
if weighted:
nx.write_edgelist(G_train, train_graph_filename, data=['weight'])
else:
nx.write_edgelist(G_train, train_graph_filename, data=False)
node_num1, edge_num1 = len(G_train.nodes), len(G_train.edges)
print('Training Graph: nodes:', node_num1, 'edges:', edge_num1)
return G, G_train, testing_pos_edges, train_graph_filename
def generate_neg_edges(original_graph, testing_edges_num, seed):
L = list(original_graph.nodes())
# create a complete graph
G = nx.Graph()
G.add_nodes_from(L)
G.add_edges_from(itertools.combinations(L, 2))
# remove original edges
G.remove_edges_from(original_graph.edges())
random.seed(seed)
neg_edges = random.sample(G.edges, testing_edges_num)
return neg_edges
def load_embedding(embedding_file_name, node_list=None):
with open(embedding_file_name) as f:
node_num, emb_size = f.readline().split()
print('Nodes with embedding: %s'%node_num)
embedding_look_up = {}
if node_list:
for line in f:
vec = line.strip().split()
node_id = vec[0]
if (node_id in node_list):
emb = [float(x) for x in vec[1:]]
emb = emb / np.linalg.norm(emb)
emb[np.isnan(emb)] = 0
embedding_look_up[node_id] = np.array(emb)
# if len(node_list) != len(embedding_look_up):
# diff_nodes=set(node_list).difference(set(embedding_look_up.keys()))
# for node in diff_nodes:
# emb = np.random.random((int(emb_size)))
# emb = emb / np.linalg.norm(emb)
# emb[np.isnan(emb)] = 0
# embedding_look_up[node] = np.array(emb)
assert len(node_list) == len(embedding_look_up)
else:
for line in f:
vec = line.strip().split()
node_id = vec[0]
embeddings = vec[1:]
emb = [float(x) for x in embeddings]
emb = emb / np.linalg.norm(emb)
emb[np.isnan(emb)] = 0
embedding_look_up[node_id] = list(emb)
assert int(node_num) == len(embedding_look_up)
f.close()
return embedding_look_up
def read_node_labels(filename):
fin = open(filename, 'r')
node_list = []
labels = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split()
node_list.append(vec[0])
labels.append(vec[1:])
fin.close()
print('Nodes with labels: %s'%len(node_list))
return node_list, labels
def split_train_test_classify(embedding_look_up, X, Y, seed, testing_ratio=0.2):
state = np.random.get_state()
training_ratio = 1 - testing_ratio
training_size = int(training_ratio * len(X))
np.random.seed(seed)
shuffle_indices = np.random.permutation(np.arange(len(X)))
X_train = [embedding_look_up[X[shuffle_indices[i]]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [embedding_look_up[X[shuffle_indices[i]]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
X_train = np.array(X_train)
Y_train = np.array(Y_train)
X_test = np.array(X_test)
Y_test = np.array(Y_test)
np.random.set_state(state)
return X_train, Y_train, X_test, Y_test
def get_y_pred(y_test, y_pred_prob):
y_pred = np.zeros(y_pred_prob.shape)
sort_index = np.flip(np.argsort(y_pred_prob, axis=1), 1)
for i in range(y_test.shape[0]):
num = np.sum(y_test[i])
for j in range(num):
y_pred[i][sort_index[i][j]] = 1
return y_pred
| 6,094 | 33.050279 | 105 | py |
BioNEV | BioNEV-master/src/bionev/GAE/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
BioNEV | BioNEV-master/src/bionev/GAE/initialization.py | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
def weight_variable_glorot(input_dim, output_dim, name=""):
"""Create a weight variable with Glorot & Bengio (AISTATS 2010)
initialization.
"""
init_range = np.sqrt(6.0 / (input_dim + output_dim))
initial = tf.random_uniform([input_dim, output_dim], minval=-init_range,
maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
| 472 | 30.533333 | 76 | py |
BioNEV | BioNEV-master/src/bionev/GAE/layers.py | # -*- coding: utf-8 -*-
import tensorflow as tf
from bionev.GAE.initialization import *
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs
"""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def dropout_sparse(x, keep_prob, num_nonzero_elems):
"""Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
"""
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1. / keep_prob)
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
# Properties
name: String, defines the variable scope of the layer.
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.issparse = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
class GraphConvolution(Layer):
"""Basic graph convolution layer for undirected graph without edge labels."""
def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1 - self.dropout)
x = tf.matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
class GraphConvolutionSparse(Layer):
"""Graph convolution layer for sparse inputs."""
def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolutionSparse, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
self.issparse = True
self.features_nonzero = features_nonzero
def _call(self, inputs):
x = inputs
x = dropout_sparse(x, 1 - self.dropout, self.features_nonzero)
x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
class InnerProductDecoder(Layer):
"""Decoder model layer for link prediction."""
def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, **kwargs):
super(InnerProductDecoder, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
def _call(self, inputs):
inputs = tf.nn.dropout(inputs, 1 - self.dropout)
x = tf.transpose(inputs)
x = tf.matmul(inputs, x)
x = tf.reshape(x, [-1])
outputs = self.act(x)
return outputs
| 4,048 | 31.392 | 107 | py |
BioNEV | BioNEV-master/src/bionev/GAE/model.py | # -*- coding: utf-8 -*-
import tensorflow as tf
from bionev.GAE.layers import GraphConvolution, GraphConvolutionSparse, InnerProductDecoder
flags = tf.app.flags
FLAGS = flags.FLAGS
class Model(object):
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
name = self.__class__.__name__.lower()
self.name = name
logging = kwargs.get('logging', False)
self.logging = logging
self.vars = {}
def _build(self):
raise NotImplementedError
def build(self):
""" Wrapper for _build() """
with tf.variable_scope(self.name):
self._build()
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.vars = {var.name: var for var in variables}
def fit(self):
pass
def predict(self):
pass
class GCNModelAE(Model):
def __init__(self, placeholders, num_features, features_nonzero, hidden1, hidden2, **kwargs):
super(GCNModelAE, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = num_features
self.features_nonzero = features_nonzero
self.adj = placeholders['adj']
self.dropout = placeholders['dropout']
self.hidden_dim_1 = hidden1
self.hidden_dim_2 = hidden2
self.build()
def _build(self):
self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,
output_dim=self.hidden_dim_1,
adj=self.adj,
features_nonzero=self.features_nonzero,
act=tf.nn.relu,
dropout=self.dropout,
logging=self.logging)(self.inputs)
self.embeddings = GraphConvolution(input_dim=self.hidden_dim_1,
output_dim=self.hidden_dim_2,
adj=self.adj,
act=lambda x: x,
dropout=self.dropout,
logging=self.logging)(self.hidden1)
self.z_mean = self.embeddings
self.reconstructions = InnerProductDecoder(input_dim=self.hidden_dim_2,
act=lambda x: x,
logging=self.logging)(self.embeddings)
class GCNModelVAE(Model):
def __init__(self, placeholders, num_features, num_nodes, features_nonzero, hidden1, hidden2, **kwargs):
super(GCNModelVAE, self).__init__(**kwargs)
self.inputs = placeholders['features']
self.input_dim = num_features
self.features_nonzero = features_nonzero
self.n_samples = num_nodes
self.adj = placeholders['adj']
self.dropout = placeholders['dropout']
self.hidden_dim_1 = hidden1
self.hidden_dim_2 = hidden2
self.build()
def _build(self):
self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim,
output_dim=self.hidden_dim_1,
adj=self.adj,
features_nonzero=self.features_nonzero,
act=tf.nn.relu,
dropout=self.dropout,
logging=self.logging)(self.inputs)
self.z_mean = GraphConvolution(input_dim=self.hidden_dim_1,
output_dim=self.hidden_dim_2,
adj=self.adj,
act=lambda x: x,
dropout=self.dropout,
logging=self.logging)(self.hidden1)
self.z_log_std = GraphConvolution(input_dim=self.hidden_dim_1,
output_dim=self.hidden_dim_2,
adj=self.adj,
act=lambda x: x,
dropout=self.dropout,
logging=self.logging)(self.hidden1)
self.z = self.z_mean + tf.random_normal([self.n_samples, self.hidden_dim_2]) * tf.exp(self.z_log_std)
self.reconstructions = InnerProductDecoder(input_dim=self.hidden_dim_2,
act=lambda x: x,
logging=self.logging)(self.z)
| 5,047 | 39.709677 | 109 | py |
BioNEV | BioNEV-master/src/bionev/GAE/optimizer.py | # -*- coding: utf-8 -*-
import tensorflow as tf
class OptimizerAE(object):
def __init__(self, preds, labels, pos_weight, norm, learning_rate):
preds_sub = preds
labels_sub = labels
self.cost = norm * tf.reduce_mean(
tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) # Adam Optimizer
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
class OptimizerVAE(object):
def __init__(self, preds, labels, model, num_nodes, pos_weight, norm, learning_rate):
preds_sub = preds
labels_sub = labels
self.cost = norm * tf.reduce_mean(
tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) # Adam Optimizer
# Latent loss
self.log_lik = self.cost
self.kl = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_log_std - tf.square(model.z_mean) -
tf.square(tf.exp(model.z_log_std)), 1))
self.cost -= self.kl
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
self.correct_prediction = tf.equal(tf.cast(tf.greater_equal(tf.sigmoid(preds_sub), 0.5), tf.int32),
tf.cast(labels_sub, tf.int32))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
| 1,989 | 44.227273 | 118 | py |
BioNEV | BioNEV-master/src/bionev/GAE/preprocessing.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.sparse as sp
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(adj_normalized, adj, features, placeholders):
# construct feed dictionary
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj']: adj_normalized})
feed_dict.update({placeholders['adj_orig']: adj})
return feed_dict
def mask_test_edges(adj):
# Function to build test set with 10% positive links
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_test = int(np.floor(edges.shape[0] / 10.))
num_val = int(np.floor(edges.shape[0] / 20.))
all_edge_idx = list(range(edges.shape[0]))
np.random.shuffle(all_edge_idx)
val_edge_idx = all_edge_idx[:num_val]
test_edge_idx = all_edge_idx[num_val:(num_val + num_test)]
test_edges = edges[test_edge_idx]
val_edges = edges[val_edge_idx]
train_edges = np.delete(edges, np.hstack([test_edge_idx, val_edge_idx]), axis=0)
def ismember(a, b, tol=5):
rows_close = np.all(np.round(a - b[:, None], tol) == 0, axis=-1)
return np.any(rows_close)
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_all):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
val_edges_false = []
while len(val_edges_false) < len(val_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], train_edges):
continue
if ismember([idx_j, idx_i], train_edges):
continue
if ismember([idx_i, idx_j], val_edges):
continue
if ismember([idx_j, idx_i], val_edges):
continue
if val_edges_false:
if ismember([idx_j, idx_i], np.array(val_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(val_edges_false)):
continue
val_edges_false.append([idx_i, idx_j])
assert ~ismember(test_edges_false, edges_all)
assert ~ismember(val_edges_false, edges_all)
assert ~ismember(val_edges, train_edges)
assert ~ismember(test_edges, train_edges)
assert ~ismember(val_edges, test_edges)
data = np.ones(train_edges.shape[0])
# Re-build adj matrix
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj.shape)
adj_train = adj_train + adj_train.T
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false
| 4,060 | 34.938053 | 104 | py |
BioNEV | BioNEV-master/src/bionev/GAE/train_model.py | # -*- coding: utf-8 -*-
import time
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from bionev.GAE.model import GCNModelAE, GCNModelVAE
from bionev.GAE.optimizer import OptimizerAE, OptimizerVAE
from bionev.GAE.preprocessing import construct_feed_dict, preprocess_graph, sparse_to_tuple
# # Train on CPU (hide GPU) due to memory constraints
# os.environ['CUDA_VISIBLE_DEVICES'] = ""
class gae_model(object):
def __init__(self, args):
super(gae_model, self).__init__()
self.learning_rate = args.lr
self.epochs = args.epochs
self.hidden1 = args.hidden
self.hidden2 = args.dimensions
self.weight_decay = args.weight_decay
self.dropout = args.dropout
self.model_selection = args.gae_model_selection
self.model = None
def save_embeddings(self, output, node_list):
self.feed_dict.update({self.placeholders['dropout']: 0})
emb = self.sess.run(self.model.z_mean, feed_dict=self.feed_dict)
print(emb.shape)
fout = open(output, 'w')
fout.write("{} {}\n".format(emb.shape[0], emb.shape[1]))
for idx in range(emb.shape[0]):
fout.write("{} {}\n".format(node_list[idx], ' '.join([str(x) for x in emb[idx, :]])))
fout.close()
def train(self, adj):
# Store original adjacency matrix (without diagonal entries) for later
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_train = adj
features = sp.identity(adj.shape[0]) # featureless
# Some preprocessing
adj_norm = preprocess_graph(adj)
# Define placeholders
self.placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=())
}
num_nodes = adj.shape[0]
features = sparse_to_tuple(features.tocoo())
num_features = features[2][1]
features_nonzero = features[1].shape[0]
# Create model
if self.model_selection == 'gcn_ae':
self.model = GCNModelAE(self.placeholders, num_features, features_nonzero, self.hidden1, self.hidden2)
elif self.model_selection == 'gcn_vae':
self.model = GCNModelVAE(self.placeholders, num_features, num_nodes, features_nonzero, self.hidden1,
self.hidden2)
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# Optimizer
with tf.name_scope('optimizer'):
if self.model_selection == 'gcn_ae':
opt = OptimizerAE(preds=self.model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(self.placeholders['adj_orig'],
validate_indices=False), [-1]),
pos_weight=pos_weight,
norm=norm,
learning_rate=self.learning_rate
)
elif self.model_selection == 'gcn_vae':
opt = OptimizerVAE(preds=self.model.reconstructions,
labels=tf.reshape(tf.sparse_tensor_to_dense(self.placeholders['adj_orig'],
validate_indices=False), [-1]),
model=self.model,
num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm,
learning_rate=self.learning_rate
)
# Initialize session
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = sparse_to_tuple(adj_label)
# Train model
for epoch in range(self.epochs):
t = time.time()
# Construct feed dictionary
self.feed_dict = construct_feed_dict(adj_norm, adj_label, features, self.placeholders)
self.feed_dict.update({self.placeholders['dropout']: self.dropout})
# Run single weight update
outs = self.sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=self.feed_dict)
# Compute average loss
avg_cost = outs[1]
avg_accuracy = outs[2]
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
"train_acc=", "{:.5f}".format(avg_accuracy),
"time=", "{:.5f}".format(time.time() - t))
print("Optimization Finished!")
| 5,069 | 41.605042 | 114 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/classify.py | # -*- coding: utf-8 -*-
import numpy
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
probs_[:] = 0
probs_[labels] = 1
all_labels.append(probs_)
return numpy.asarray(all_labels)
class Classifier(object):
def __init__(self, vectors, clf):
self.embeddings = vectors
self.clf = TopKRanker(clf)
self.binarizer = MultiLabelBinarizer(sparse_output=True)
def train(self, X, Y, Y_all):
self.binarizer.fit(Y_all)
X_train = [self.embeddings[x] for x in X]
Y = self.binarizer.transform(Y)
self.clf.fit(X_train, Y)
def evaluate(self, X, Y):
top_k_list = [len(l) for l in Y]
Y_ = self.predict(X, top_k_list)
Y = self.binarizer.transform(Y)
averages = ["micro", "macro", "samples", "weighted"]
results = {}
for average in averages:
results[average] = f1_score(Y, Y_, average=average)
# print('Results, using embeddings of dimensionality', len(self.embeddings[X[0]]))
# print('-------------------')
print(results)
return results
# print('-------------------')
def predict(self, X, top_k_list):
X_ = numpy.asarray([self.embeddings[x] for x in X])
Y = self.clf.predict(X_, top_k_list=top_k_list)
return Y
def split_train_evaluate(self, X, Y, train_precent, seed=0):
state = numpy.random.get_state()
training_size = int(train_precent * len(X))
numpy.random.seed(seed)
shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))
X_train = [X[shuffle_indices[i]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
self.train(X_train, Y_train, Y)
numpy.random.set_state(state)
return self.evaluate(X_test, Y_test)
def load_embeddings(filename):
fin = open(filename, 'r')
node_num, size = [int(x) for x in fin.readline().strip().split()]
vectors = {}
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
assert len(vec) == size + 1
vectors[vec[0]] = [float(x) for x in vec[1:]]
fin.close()
assert len(vectors) == node_num
return vectors
def read_node_label(filename):
fin = open(filename, 'r')
X = []
Y = []
while 1:
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
X.append(vec[0])
Y.append(vec[1:])
fin.close()
return X, Y
| 3,142 | 30.747475 | 90 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/gf.py | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
__author__ = "Wang Binlu"
__email__ = "wblmail@whu.edu.cn"
class GraphFactorization(object):
def __init__(self, graph, rep_size=128, epoch=120, learning_rate=0.003, weight_decay=1.):
self.g = graph
self.node_size = graph.G.number_of_nodes()
self.rep_size = rep_size
self.max_iter = epoch
self.lr = learning_rate
self.lamb = weight_decay
self.sess = tf.Session()
self.adj_mat = self.getAdj()
self.vectors = {}
self.embeddings = self.get_train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def get_train(self):
adj_mat = self.adj_mat
mat_mask = 1. * (adj_mat > 0)
_embeddings = tf.Variable(tf.contrib.layers.xavier_initializer()([self.node_size, self.rep_size]),
dtype=tf.float32, name='embeddings')
Adj = tf.placeholder(tf.float32, [self.node_size, self.node_size], name='adj_mat')
AdjMask = tf.placeholder(tf.float32, [self.node_size, self.node_size], name='adj_mask')
cost = tf.reduce_sum(
tf.square(Adj - tf.matmul(_embeddings, tf.transpose(_embeddings)) * AdjMask)) + \
self.lamb * tf.reduce_sum(tf.square(_embeddings))
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.minimize(cost)
init = tf.global_variables_initializer()
self.sess.run(init)
print("total iter: %i" % self.max_iter)
for step in range(self.max_iter):
self.sess.run(train_op, feed_dict={Adj: adj_mat, AdjMask: mat_mask})
if step % 50 == 0:
print("step %i: cost: %g" % (step, self.sess.run(cost, feed_dict={Adj: adj_mat, AdjMask: mat_mask})))
return self.sess.run(_embeddings)
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.rep_size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
| 2,561 | 33.621622 | 117 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/graph.py | # -*- coding: utf-8 -*-
"""Graph utilities."""
import networkx as nx
import numpy as np
__author__ = "Zhang Zhengyan"
__email__ = "zhangzhengyan14@mails.tsinghua.edu.cn"
class Graph(object):
def __init__(self):
self.G = None
self.look_up_dict = {}
self.look_back_list = []
self.node_size = 0
def encode_node(self):
look_up = self.look_up_dict
look_back = self.look_back_list
for node in self.G.nodes():
look_up[node] = self.node_size
look_back.append(node)
self.node_size += 1
self.G.nodes[node]['status'] = ''
def read_g(self, g):
self.G = g
self.encode_node()
def read_adjlist(self, filename):
""" Read graph from adjacency file in which the edge must be unweighted
the format of each line: v1 n1 n2 n3 ... nk
:param filename: the filename of input file
"""
self.G = nx.read_adjlist(filename, create_using=nx.DiGraph())
for i, j in self.G.edges():
self.G[i][j]['weight'] = 1.0
self.encode_node()
def read_edgelist(self, filename, weighted=False, directed=False):
self.G = nx.DiGraph()
if directed:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
self.G.add_edge(src, dst)
self.G[src][dst]['weight'] = float(w)
else:
def read_unweighted(l):
src, dst = l.split()
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = 1.0
self.G[dst][src]['weight'] = 1.0
def read_weighted(l):
src, dst, w = l.split()
# print(src, dst, float(w))
self.G.add_edge(src, dst)
self.G.add_edge(dst, src)
self.G[src][dst]['weight'] = float(w)
self.G[dst][src]['weight'] = float(w)
fin = open(filename, 'r')
func = read_unweighted
if weighted:
func = read_weighted
while 1:
l = fin.readline()
if l == '':
break
func(l)
fin.close()
self.encode_node()
def read_node_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['label'] = vec[1:]
fin.close()
def read_node_features(self, filename):
fin = open(filename, 'r')
for l in fin.readlines():
vec = l.split()
self.G.nodes[vec[0]]['feature'] = np.array(
[float(x) for x in vec[1:]])
fin.close()
def read_node_status(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G.nodes[vec[0]]['status'] = vec[1] # train test valid
fin.close()
def read_edge_label(self, filename):
fin = open(filename, 'r')
while 1:
l = fin.readline()
if l == '':
break
vec = l.split()
self.G[vec[0]][vec[1]]['label'] = vec[2:]
fin.close()
| 3,533 | 28.45 | 79 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/grarep.py | # -*- coding: utf-8 -*-
import numpy as np
from scipy.sparse.linalg import svds
from sklearn.preprocessing import normalize
class GraRep(object):
def __init__(self, graph, Kstep, dim):
self.g = graph
self.Kstep = Kstep
assert dim % Kstep == 0
self.dim = int(dim / Kstep)
self.train()
def getAdjMat(self):
graph = self.g.G
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = 1.0
adj[look_up[edge[1]]][look_up[edge[0]]] = 1.0
# ScaleSimMat
# print('finish getAdjMat')
return np.matrix(adj)
def GetProbTranMat(self, Ak):
# print(np.sum(Ak, axis=0))
tileMat = np.tile(np.sum(Ak, axis=0), (self.node_size, 1))
# print(np.min(tileMat))
probTranMat = np.log(Ak / tileMat) - np.log(1.0 / self.node_size)
probTranMat[probTranMat < 0] = 0
probTranMat[probTranMat == np.nan] = 0
return probTranMat
def GetRepUseSVD(self, probTranMat, alpha):
# U, S, VT = la.svd(probTranMat)
U, Sigma, VT = svds(probTranMat, self.dim)
# print("finish svd..")
Sigma = np.diag(Sigma)
W = np.matmul(U, np.power(Sigma, alpha))
C = np.matmul(VT.T, np.power(Sigma, alpha))
# print(np.sum(U))
embeddings = W + C
return embeddings
# Ud = U[:, 0:self.dim]
# Sd = S[0:self.dim]
# return np.array(Ud)*np.power(Sd, alpha).reshape((self.dim))
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.Kstep * self.dim))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
def train(self):
self.adj = self.getAdjMat()
self.node_size = self.adj.shape[0]
self.Ak = np.matrix(np.identity(self.node_size))
self.RepMat = np.zeros((self.node_size, int(self.dim * self.Kstep)))
for i in range(self.Kstep):
print('Kstep =', i)
self.Ak = np.dot(self.Ak, self.adj)
# print('finish np.dot(self.Ak, self.adj)')
probTranMat = self.GetProbTranMat(self.Ak)
# print('finish GetProbTranMat')
Rk = self.GetRepUseSVD(probTranMat, 0.5)
# print('finish GetRepUseSVD')
Rk = normalize(Rk, axis=1, norm='l2')
# print('finish normalize')
self.RepMat[:, self.dim * i:self.dim * (i + 1)] = Rk[:, :]
# print('finish RepMat[:, self.dim*i:self.dim*(i+1)] = Rk[:, :]')
# get embeddings
self.vectors = {}
look_back = self.g.look_back_list
for i, embedding in enumerate(self.RepMat):
self.vectors[look_back[i]] = embedding
| 3,001 | 35.168675 | 79 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/hope.py | # -*- coding: utf-8 -*-
import networkx as nx
import numpy as np
import scipy.sparse.linalg as lg
__author__ = "Alan WANG"
__email__ = "alan1995wang@outlook.com"
class HOPE(object):
def __init__(self, graph, d):
'''
d: representation vector dimension
'''
self._d = d
self._graph = graph.G
self.g = graph
self._node_num = graph.node_size
self.learn_embedding()
def learn_embedding(self):
graph = self.g.G
A = nx.to_numpy_matrix(graph)
# self._beta = 0.0728
# M_g = np.eye(graph.number_of_nodes()) - self._beta * A
# M_l = self._beta * A
M_g = np.eye(graph.number_of_nodes())
M_l = np.dot(A, A)
S = np.dot(np.linalg.inv(M_g), M_l)
# s: \sigma_k
u, s, vt = lg.svds(S, k=self._d // 2)
sigma = np.diagflat(np.sqrt(s))
X1 = np.dot(u, sigma)
X2 = np.dot(vt.T, sigma)
# self._X = X2
self._X = np.concatenate((X1, X2), axis=1)
@property
def vectors(self):
vectors = {}
look_back = self.g.look_back_list
for i, embedding in enumerate(self._X):
vectors[look_back[i]] = embedding
return vectors
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self._d))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
| 1,605 | 25.766667 | 73 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/lap.py | # -*- coding: utf-8 -*-
import networkx as nx
import numpy as np
from scipy.sparse.linalg import eigsh
__author__ = "Wang Binlu"
__email__ = "wblmail@whu.edu.cn"
class LaplacianEigenmaps(object):
def __init__(self, graph, rep_size=128):
self.g = graph
self.node_size = self.g.G.number_of_nodes()
self.rep_size = rep_size
self.adj_mat = nx.to_numpy_array(self.g.G)
self.vectors = {}
self.embeddings = self.get_train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def getLap(self):
# degree_mat = np.diagflat(np.sum(self.adj_mat, axis=1))
# print('np.diagflat(np.sum(self.adj_mat, axis=1))')
# deg_trans = np.diagflat(np.reciprocal(np.sqrt(np.sum(self.adj_mat, axis=1))))
# print('np.diagflat(np.reciprocal(np.sqrt(np.sum(self.adj_mat, axis=1))))')
# deg_trans = np.nan_to_num(deg_trans)
# L = degree_mat-self.adj_mat
# print('begin norm_lap_mat')
# # eye = np.eye(self.node_size)
#
# norm_lap_mat = np.matmul(np.matmul(deg_trans, L), deg_trans)
G = self.g.G.to_undirected()
print('begin norm_lap_mat')
norm_lap_mat = nx.normalized_laplacian_matrix(G)
print('finish norm_lap_mat')
return norm_lap_mat
def get_train(self):
lap_mat = self.getLap()
print('finish getLap...')
w, vec = eigsh(lap_mat, k=self.rep_size)
print('finish eigh(lap_mat)...')
# start = 0
# for i in range(self.node_size):
# if w[i] > 1e-10:
# start = i
# break
# vec = vec[:, start:start+self.rep_size]
return vec
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.rep_size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
| 2,399 | 33.285714 | 90 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/line.py | # -*- coding: utf-8 -*-
import math
import random
import numpy as np
import tensorflow as tf
from sklearn.linear_model import LogisticRegression
from bionev.OpenNE.classify import Classifier, read_node_label
class _LINE(object):
def __init__(self, graph, rep_size=128, batch_size=1000, negative_ratio=5, order=3):
self.cur_epoch = 0
self.order = order
self.g = graph
self.node_size = graph.G.number_of_nodes()
self.rep_size = rep_size
self.batch_size = batch_size
self.negative_ratio = negative_ratio
self.gen_sampling_table()
self.sess = tf.Session()
cur_seed = random.getrandbits(32)
initializer = tf.contrib.layers.xavier_initializer(
uniform=False, seed=cur_seed)
with tf.variable_scope("model", reuse=None, initializer=initializer):
self.build_graph()
self.sess.run(tf.global_variables_initializer())
def build_graph(self):
self.h = tf.placeholder(tf.int32, [None])
self.t = tf.placeholder(tf.int32, [None])
self.sign = tf.placeholder(tf.float32, [None])
cur_seed = random.getrandbits(32)
self.embeddings = tf.get_variable(name="embeddings" + str(self.order), shape=[
self.node_size, self.rep_size], initializer=tf.contrib.layers.xavier_initializer(uniform=False,
seed=cur_seed))
self.context_embeddings = tf.get_variable(name="context_embeddings" + str(self.order), shape=[
self.node_size, self.rep_size], initializer=tf.contrib.layers.xavier_initializer(uniform=False,
seed=cur_seed))
# self.h_e = tf.nn.l2_normalize(tf.nn.embedding_lookup(self.embeddings, self.h), 1)
# self.t_e = tf.nn.l2_normalize(tf.nn.embedding_lookup(self.embeddings, self.t), 1)
# self.t_e_context = tf.nn.l2_normalize(tf.nn.embedding_lookup(self.context_embeddings, self.t), 1)
self.h_e = tf.nn.embedding_lookup(self.embeddings, self.h)
self.t_e = tf.nn.embedding_lookup(self.embeddings, self.t)
self.t_e_context = tf.nn.embedding_lookup(
self.context_embeddings, self.t)
self.second_loss = -tf.reduce_mean(tf.log_sigmoid(
self.sign * tf.reduce_sum(tf.multiply(self.h_e, self.t_e_context), axis=1)))
self.first_loss = -tf.reduce_mean(tf.log_sigmoid(
self.sign * tf.reduce_sum(tf.multiply(self.h_e, self.t_e), axis=1)))
if self.order == 1:
self.loss = self.first_loss
else:
self.loss = self.second_loss
optimizer = tf.train.AdamOptimizer(0.001)
self.train_op = optimizer.minimize(self.loss)
def train_one_epoch(self):
sum_loss = 0.0
batches = self.batch_iter()
batch_id = 0
for batch in batches:
h, t, sign = batch
feed_dict = {
self.h: h,
self.t: t,
self.sign: sign,
}
_, cur_loss = self.sess.run([self.train_op, self.loss], feed_dict)
sum_loss += cur_loss
batch_id += 1
print('epoch:{} sum of loss:{!s}'.format(self.cur_epoch, sum_loss))
self.cur_epoch += 1
def batch_iter(self):
look_up = self.g.look_up_dict
table_size = 1e8
numNodes = self.node_size
edges = [(look_up[x[0]], look_up[x[1]]) for x in self.g.G.edges()]
data_size = self.g.G.number_of_edges()
edge_set = set([x[0] * numNodes + x[1] for x in edges])
shuffle_indices = np.random.permutation(np.arange(data_size))
# positive or negative mod
mod = 0
mod_size = 1 + self.negative_ratio
h = []
t = []
sign = 0
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
while start_index < data_size:
if mod == 0:
sign = 1.
h = []
t = []
for i in range(start_index, end_index):
if not random.random() < self.edge_prob[shuffle_indices[i]]:
shuffle_indices[i] = self.edge_alias[shuffle_indices[i]]
cur_h = edges[shuffle_indices[i]][0]
cur_t = edges[shuffle_indices[i]][1]
h.append(cur_h)
t.append(cur_t)
else:
sign = -1.
t = []
for i in range(len(h)):
t.append(
self.sampling_table[random.randint(0, table_size - 1)])
yield h, t, [sign]
mod += 1
mod %= mod_size
if mod == 0:
start_index = end_index
end_index = min(start_index + self.batch_size, data_size)
def gen_sampling_table(self):
table_size = 1e8
power = 0.75
numNodes = self.node_size
print("Pre-procesing for non-uniform negative sampling!")
node_degree = np.zeros(numNodes) # out degree
look_up = self.g.look_up_dict
for edge in self.g.G.edges():
node_degree[look_up[edge[0]]
] += self.g.G[edge[0]][edge[1]]["weight"]
norm = sum([math.pow(node_degree[i], power) for i in range(numNodes)])
self.sampling_table = np.zeros(int(table_size), dtype=np.uint32)
p = 0
i = 0
for j in range(numNodes):
p += float(math.pow(node_degree[j], power)) / norm
while i < table_size and float(i) / table_size < p:
self.sampling_table[i] = j
i += 1
data_size = self.g.G.number_of_edges()
self.edge_alias = np.zeros(data_size, dtype=np.int32)
self.edge_prob = np.zeros(data_size, dtype=np.float32)
large_block = np.zeros(data_size, dtype=np.int32)
small_block = np.zeros(data_size, dtype=np.int32)
total_sum = sum([self.g.G[edge[0]][edge[1]]["weight"]
for edge in self.g.G.edges()])
norm_prob = [self.g.G[edge[0]][edge[1]]["weight"] *
data_size / total_sum for edge in self.g.G.edges()]
num_small_block = 0
num_large_block = 0
cur_small_block = 0
cur_large_block = 0
for k in range(data_size - 1, -1, -1):
if norm_prob[k] < 1:
small_block[num_small_block] = k
num_small_block += 1
else:
large_block[num_large_block] = k
num_large_block += 1
while num_small_block and num_large_block:
num_small_block -= 1
cur_small_block = small_block[num_small_block]
num_large_block -= 1
cur_large_block = large_block[num_large_block]
self.edge_prob[cur_small_block] = norm_prob[cur_small_block]
self.edge_alias[cur_small_block] = cur_large_block
norm_prob[cur_large_block] = norm_prob[cur_large_block] + \
norm_prob[cur_small_block] - 1
if norm_prob[cur_large_block] < 1:
small_block[num_small_block] = cur_large_block
num_small_block += 1
else:
large_block[num_large_block] = cur_large_block
num_large_block += 1
while num_large_block:
num_large_block -= 1
self.edge_prob[large_block[num_large_block]] = 1
while num_small_block:
num_small_block -= 1
self.edge_prob[small_block[num_small_block]] = 1
def get_embeddings(self):
vectors = {}
embeddings = self.embeddings.eval(session=self.sess)
# embeddings = self.sess.run(tf.nn.l2_normalize(self.embeddings.eval(session=self.sess), 1))
look_back = self.g.look_back_list
for i, embedding in enumerate(embeddings):
vectors[look_back[i]] = embedding
return vectors
class LINE(object):
def __init__(self, graph, rep_size=128, batch_size=1000, epoch=10, negative_ratio=5, order=3, label_file=None,
clf_ratio=0.5, auto_save=True):
self.rep_size = rep_size
self.order = order
self.best_result = 0
self.vectors = {}
if order == 3:
self.model1 = _LINE(graph, rep_size / 2, batch_size,
negative_ratio, order=1)
self.model2 = _LINE(graph, rep_size / 2, batch_size,
negative_ratio, order=2)
for i in range(epoch):
self.model1.train_one_epoch()
self.model2.train_one_epoch()
if label_file:
self.get_embeddings()
X, Y = read_node_label(label_file)
print("Training classifier using {:.2f}% nodes...".format(
clf_ratio * 100))
clf = Classifier(vectors=self.vectors,
clf=LogisticRegression())
result = clf.split_train_evaluate(X, Y, clf_ratio)
if result['macro'] > self.best_result:
self.best_result = result['macro']
if auto_save:
self.best_vector = self.vectors
else:
self.model = _LINE(graph, rep_size, batch_size,
negative_ratio, order=self.order)
for i in range(epoch):
self.model.train_one_epoch()
if label_file:
self.get_embeddings()
X, Y = read_node_label(label_file)
print("Training classifier using {:.2f}% nodes...".format(
clf_ratio * 100))
clf = Classifier(vectors=self.vectors,
clf=LogisticRegression())
result = clf.split_train_evaluate(X, Y, clf_ratio)
if result['macro'] > self.best_result:
self.best_result = result['macro']
if auto_save:
self.best_vector = self.vectors
self.get_embeddings()
if auto_save and label_file:
self.vectors = self.best_vector
def get_embeddings(self):
self.last_vectors = self.vectors
self.vectors = {}
if self.order == 3:
vectors1 = self.model1.get_embeddings()
vectors2 = self.model2.get_embeddings()
for node in vectors1.keys():
self.vectors[node] = np.append(vectors1[node], vectors2[node])
else:
self.vectors = self.model.get_embeddings()
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.rep_size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
| 11,252 | 39.478417 | 114 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/node2vec.py | # -*- coding: utf-8 -*-
from gensim.models import Word2Vec
from bionev.OpenNE import walker
class Node2vec(object):
def __init__(self, graph, path_length, num_paths, dim, p=1.0, q=1.0, dw=False, **kwargs):
kwargs["workers"] = kwargs.get("workers", 1)
if dw:
kwargs["hs"] = 1
p = 1.0
q = 1.0
self.graph = graph
if dw:
self.walker = walker.BasicWalker(graph, workers=kwargs["workers"])
else:
self.walker = walker.Walker(
graph, p=p, q=q, workers=kwargs["workers"])
print("Preprocess transition probs...")
self.walker.preprocess_transition_probs()
sentences = self.walker.simulate_walks(
num_walks=num_paths, walk_length=path_length)
kwargs["sentences"] = sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = kwargs.get("size", dim)
kwargs["sg"] = 1
self.size = kwargs["size"]
print("Learning representation...")
word2vec = Word2Vec(**kwargs)
self.vectors = {}
for word in graph.G.nodes():
self.vectors[word] = word2vec.wv[word]
del word2vec
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors.keys())
fout.write("{} {}\n".format(node_num, self.size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
| 1,593 | 31.530612 | 93 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/sdne.py | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
__author__ = "Wang Binlu"
__email__ = "wblmail@whu.edu.cn"
def fc_op(input_op, name, n_out, layer_collector, act_func=tf.nn.leaky_relu):
n_in = input_op.get_shape()[-1].value
with tf.name_scope(name) as scope:
kernel = tf.Variable(tf.contrib.layers.xavier_initializer()([n_in, n_out]), dtype=tf.float32, name=scope + "w")
# kernel = tf.Variable(tf.random_normal([n_in, n_out]))
biases = tf.Variable(tf.constant(0, shape=[1, n_out], dtype=tf.float32), name=scope + 'b')
fc = tf.add(tf.matmul(input_op, kernel), biases)
activation = act_func(fc, name=scope + 'act')
layer_collector.append([kernel, biases])
return activation
class SDNE(object):
def __init__(self, graph, encoder_layer_list, alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-4,
batch_size=200, epoch=100, learning_rate=None):
"""
encoder_layer_list: a list of numbers of the neuron at each ecdoer layer, the last number is the
dimension of the output node representation
Eg:
if node size is 2000, encoder_layer_list=[1000, 128], then the whole neural network would be
2000(input)->1000->128->1000->2000, SDNE extract the middle layer as the node representation
"""
self.g = graph
self.node_size = self.g.G.number_of_nodes()
self.dim = encoder_layer_list[-1]
self.encoder_layer_list = [self.node_size]
self.encoder_layer_list.extend(encoder_layer_list)
self.encoder_layer_num = len(encoder_layer_list) + 1
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.bs = batch_size
self.epoch = epoch
self.max_iter = (epoch * self.node_size) // batch_size
self.lr = learning_rate
if self.lr is None:
self.lr = tf.train.inverse_time_decay(0.03, self.max_iter, decay_steps=1, decay_rate=0.9999)
self.sess = tf.Session()
self.vectors = {}
self.adj_mat = self.getAdj()
self.embeddings = self.train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def train(self):
adj_mat = self.adj_mat
AdjBatch = tf.placeholder(tf.float32, [None, self.node_size], name='adj_batch')
Adj = tf.placeholder(tf.float32, [None, None], name='adj_mat')
B = tf.placeholder(tf.float32, [None, self.node_size], name='b_mat')
fc = AdjBatch
scope_name = 'encoder'
layer_collector = []
with tf.name_scope(scope_name):
for i in range(1, self.encoder_layer_num):
fc = fc_op(fc,
name=scope_name + str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
_embeddings = fc
scope_name = 'decoder'
with tf.name_scope(scope_name):
for i in range(self.encoder_layer_num - 2, 0, -1):
fc = fc_op(fc,
name=scope_name + str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
fc = fc_op(fc,
name=scope_name + str(0),
n_out=self.encoder_layer_list[0],
layer_collector=layer_collector, )
_embeddings_norm = tf.reduce_sum(tf.square(_embeddings), 1, keepdims=True)
L_1st = tf.reduce_sum(
Adj * (
_embeddings_norm - 2 * tf.matmul(
_embeddings, tf.transpose(_embeddings)
) + tf.transpose(_embeddings_norm)
)
)
L_2nd = tf.reduce_sum(tf.square((AdjBatch - fc) * B))
L = L_2nd + self.alpha * L_1st
for param in layer_collector:
L += self.nu1 * tf.reduce_sum(tf.abs(param[0])) + self.nu2 * tf.reduce_sum(tf.square(param[0]))
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.minimize(L)
init = tf.global_variables_initializer()
self.sess.run(init)
print("total iter: %i" % self.max_iter)
for step in range(self.max_iter):
index = np.random.randint(self.node_size, size=self.bs)
adj_batch_train = adj_mat[index, :]
adj_mat_train = adj_batch_train[:, index]
b_mat_train = np.ones_like(adj_batch_train)
b_mat_train[adj_batch_train != 0] = self.beta
self.sess.run(train_op, feed_dict={AdjBatch: adj_batch_train,
Adj: adj_mat_train,
B: b_mat_train})
if step % 50 == 0:
l, l1, l2 = self.sess.run((L, L_1st, L_2nd),
feed_dict={AdjBatch: adj_batch_train,
Adj: adj_mat_train,
B: b_mat_train})
print("step %i: total loss: %s, l1 loss: %s, l2 loss: %s" % (step, l, l1, l2))
return self.sess.run(_embeddings, feed_dict={AdjBatch: adj_mat})
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.dim))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
class SDNE2(object):
def __init__(self, graph, encoder_layer_list, alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-5,
batch_size=100, max_iter=2000, learning_rate=None):
self.g = graph
self.node_size = self.g.G.number_of_nodes()
self.rep_size = encoder_layer_list[-1]
self.encoder_layer_list = [self.node_size] + encoder_layer_list
self.encoder_layer_num = len(encoder_layer_list) + 1
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.bs = batch_size
self.max_iter = max_iter
self.lr = learning_rate
if self.lr is None:
self.lr = tf.train.inverse_time_decay(0.1, self.max_iter, decay_steps=1, decay_rate=0.9999)
self.sess = tf.Session()
self.vectors = {}
self.adj_mat = self.getAdj()
self.deg_vec = np.sum(self.adj_mat, axis=1)
self.embeddings = self.get_train()
look_back = self.g.look_back_list
for i, embedding in enumerate(self.embeddings):
self.vectors[look_back[i]] = embedding
def getAdj(self):
node_size = self.g.node_size
look_up = self.g.look_up_dict
adj = np.zeros((node_size, node_size))
for edge in self.g.G.edges():
adj[look_up[edge[0]]][look_up[edge[1]]] = self.g.G[edge[0]][edge[1]]['weight']
return adj
def model(self, node, layer_collector, scope_name):
fc = node
with tf.name_scope(scope_name + 'encoder'):
for i in range(1, self.encoder_layer_num):
fc = fc_op(fc,
name=scope_name + str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
_embeddings = fc
with tf.name_scope(scope_name + 'decoder'):
for i in range(self.encoder_layer_num - 2, -1, -1):
fc = fc_op(fc,
name=scope_name + str(i),
n_out=self.encoder_layer_list[i],
layer_collector=layer_collector)
return _embeddings, fc
def generate_batch(self, shuffle=True):
adj = self.adj_mat
row_indices, col_indices = adj.nonzero()
sample_index = np.arange(row_indices.shape[0])
num_of_batches = row_indices.shape[0] // self.bs
counter = 0
if shuffle:
np.random.shuffle(sample_index)
while True:
batch_index = sample_index[self.bs * counter:self.bs * (counter + 1)]
nodes_a = adj[row_indices[batch_index], :]
nodes_b = adj[col_indices[batch_index], :]
weights = adj[row_indices[batch_index], col_indices[batch_index]]
weights = np.reshape(weights, [-1, 1])
beta_mask_a = np.ones_like(nodes_a)
beta_mask_a[nodes_a != 0] = self.beta
beta_mask_b = np.ones_like(nodes_b)
beta_mask_b[nodes_b != 0] = self.beta
if counter == num_of_batches:
counter = 0
np.random.shuffle(sample_index)
else:
counter += 1
yield (nodes_a, nodes_b, beta_mask_a, beta_mask_b, weights)
def get_train(self):
NodeA = tf.placeholder(tf.float32, [None, self.node_size], name='node_a')
BmaskA = tf.placeholder(tf.float32, [None, self.node_size], name='beta_mask_a')
NodeB = tf.placeholder(tf.float32, [None, self.node_size], name='node_b')
BmaskB = tf.placeholder(tf.float32, [None, self.node_size], name='beta_mask_b')
Weights = tf.placeholder(tf.float32, [None, 1], name='adj_weights')
layer_collector = []
nodes = tf.concat([NodeA, NodeB], axis=0)
bmasks = tf.concat([BmaskA, BmaskB], axis=0)
emb, recons = self.model(nodes, layer_collector, 'reconstructor')
embs = tf.split(emb, num_or_size_splits=2, axis=0)
L_1st = tf.reduce_sum(Weights * (tf.reduce_sum(tf.square(embs[0] - embs[1]), axis=1)))
L_2nd = tf.reduce_sum(tf.square((nodes - recons) * bmasks))
L = L_2nd + self.alpha * L_1st
for param in layer_collector:
L += self.nu1 * tf.reduce_sum(tf.abs(param[0])) + self.nu2 * tf.reduce_sum(tf.square(param[0]))
# lr = tf.train.exponential_decay(1e-6, self.max_iter, decay_steps=1, decay_rate=0.9999)
# optimizer = tf.train.MomentumOptimizer(lr, 0.99, use_nesterov=True)
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.minimize(L)
init = tf.global_variables_initializer()
self.sess.run(init)
generator = self.generate_batch()
for step in range(self.max_iter + 1):
nodes_a, nodes_b, beta_mask_a, beta_mask_b, weights = generator.__next__()
feed_dict = {NodeA: nodes_a,
NodeB: nodes_b,
BmaskA: beta_mask_a,
BmaskB: beta_mask_b,
Weights: weights}
self.sess.run(train_op, feed_dict=feed_dict)
if step % 50 == 0:
print("step %i: %s" % (step, self.sess.run([L, L_1st, L_2nd], feed_dict=feed_dict)))
return self.sess.run(emb, feed_dict={NodeA: self.adj_mat[0:1, :], NodeB: self.adj_mat[1:, :]})
def save_embeddings(self, filename):
fout = open(filename, 'w')
node_num = len(self.vectors)
fout.write("{} {}\n".format(node_num, self.rep_size))
for node, vec in self.vectors.items():
fout.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
fout.close()
| 11,631 | 36.766234 | 119 | py |
BioNEV | BioNEV-master/src/bionev/OpenNE/walker.py | # -*- coding: utf-8 -*-
import random
import numpy as np
def deepwalk_walk_wrapper(class_instance, walk_length, start_node):
class_instance.deepwalk_walk(walk_length, start_node)
class BasicWalker:
def __init__(self, G, workers):
self.G = G.G
self.node_size = G.node_size
self.look_up_dict = G.look_up_dict
def deepwalk_walk(self, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
G = self.G
look_up_dict = self.look_up_dict
node_size = self.node_size
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
G = self.G
walks = []
nodes = list(G.nodes())
print('Begin random walks...')
for walk_iter in range(num_walks):
# pool = multiprocessing.Pool(processes = 4)
# print(str(walk_iter+1), '/', str(num_walks))
random.shuffle(nodes)
for node in nodes:
# walks.append(pool.apply_async(deepwalk_walk_wrapper, (self, walk_length, node, )))
walks.append(self.deepwalk_walk(
walk_length=walk_length, start_node=node))
# pool.close()
# pool.join()
# print(len(walks))
print('Walk finished...')
return walks
class Walker:
def __init__(self, G, p, q, workers):
self.G = G.G
self.p = p
self.q = q
self.node_size = G.node_size
self.look_up_dict = G.look_up_dict
def node2vec_walk(self, walk_length, start_node):
'''
Simulate a random walk starting from start node.
'''
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
look_up_dict = self.look_up_dict
node_size = self.node_size
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
pos = (prev, cur)
next = cur_nbrs[alias_draw(alias_edges[pos][0],
alias_edges[pos][1])]
walk.append(next)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length):
'''
Repeatedly simulate random walks from each node.
'''
G = self.G
walks = []
nodes = list(G.nodes())
print('Begin random walk...')
for walk_iter in range(num_walks):
# print(str(walk_iter+1), '/', str(num_walks))
random.shuffle(nodes)
for node in nodes:
walks.append(self.node2vec_walk(
walk_length=walk_length, start_node=node))
print('Walk finished...')
return walks
def get_alias_edge(self, src, dst):
'''
Get the alias edge setup lists for a given edge.
'''
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for dst_nbr in G.neighbors(dst):
if dst_nbr == src:
unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)
elif G.has_edge(dst_nbr, src):
unnormalized_probs.append(G[dst][dst_nbr]['weight'])
else:
unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) / norm_const for u_prob in unnormalized_probs]
return alias_setup(normalized_probs)
def preprocess_transition_probs(self):
'''
Preprocessing of transition probabilities for guiding the random walks.
'''
G = self.G
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr]['weight']
for nbr in G.neighbors(node)]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) / norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = alias_setup(normalized_probs)
alias_edges = {}
triads = {}
look_up_dict = self.look_up_dict
node_size = self.node_size
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def alias_setup(probs):
'''
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
K = len(probs)
q = np.zeros(K, dtype=np.float32)
J = np.zeros(K, dtype=np.int32)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
def alias_draw(J, q):
'''
Draw sample from a non-uniform discrete distribution using alias sampling.
'''
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
| 6,186 | 28.461905 | 123 | py |
BioNEV | BioNEV-master/src/bionev/SVD/__init__.py | 0 | 0 | 0 | py | |
BioNEV | BioNEV-master/src/bionev/SVD/model.py | import networkx as nx
import numpy as np
from scipy.sparse.linalg import svds
def SVD_embedding(G, output_filename, size=100):
node_list = list(G.nodes())
adjacency_matrix = nx.adjacency_matrix(G, node_list)
adjacency_matrix = adjacency_matrix.astype(float)
# adjacency_matrix = sparse.csc_matrix(adjacency_matrix)
U, Sigma, VT = svds(adjacency_matrix, k=size)
Sigma = np.diag(Sigma)
W = np.matmul(U, np.sqrt(Sigma))
C = np.matmul(VT.T, np.sqrt(Sigma))
# print(np.sum(U))
embeddings = W + C
vectors = {}
for id, node in enumerate(node_list):
vectors[node] = list(np.array(embeddings[id]))
fout = open(output_filename, 'w')
node_num = len(vectors.keys())
fout.write("{} {}\n".format(node_num, size))
for node, vec in vectors.items():
fout.write("{} {}\n".format(node,
' '.join([str(x) for x in vec])))
fout.close()
return
| 949 | 30.666667 | 69 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/__init__.py | # -*- coding: utf-8 -*-
| 24 | 11.5 | 23 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/algorithms.py | # -*- coding: utf-8 -*-
import math
import random
from collections import deque
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
from bionev.struc2vec.utils import *
def generate_parameters_random_walk(workers):
logging.info('Loading distances_nets from disk...')
sum_weights = {}
amount_edges = {}
layer = 0
while (isPickle('distances_nets_weights-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
weights = restoreVariableFromDisk('distances_nets_weights-layer-' + str(layer))
for k, list_weights in weights.items():
if (layer not in sum_weights):
sum_weights[layer] = 0
if (layer not in amount_edges):
amount_edges[layer] = 0
for w in list_weights:
sum_weights[layer] += w
amount_edges[layer] += 1
logging.info('Layer {} executed.'.format(layer))
layer += 1
average_weight = {}
for layer in sum_weights.keys():
average_weight[layer] = sum_weights[layer] / amount_edges[layer]
logging.info("Saving average_weights on disk...")
saveVariableOnDisk(average_weight, 'average_weight')
amount_neighbours = {}
layer = 0
while (isPickle('distances_nets_weights-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
weights = restoreVariableFromDisk('distances_nets_weights-layer-' + str(layer))
amount_neighbours[layer] = {}
for k, list_weights in weights.items():
cont_neighbours = 0
for w in list_weights:
if (w > average_weight[layer]):
cont_neighbours += 1
amount_neighbours[layer][k] = cont_neighbours
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info("Saving amount_neighbours on disk...")
saveVariableOnDisk(amount_neighbours, 'amount_neighbours')
def chooseNeighbor(v, graphs, alias_method_j, alias_method_q, layer):
v_list = graphs[layer][v]
idx = alias_draw(alias_method_j[layer][v], alias_method_q[layer][v])
v = v_list[idx]
return v
def exec_random_walk(graphs, alias_method_j, alias_method_q, v, walk_length, amount_neighbours):
original_v = v
t0 = time()
initialLayer = 0
layer = initialLayer
path = deque()
path.append(v)
while len(path) < walk_length:
r = random.random()
if (r < 0.3):
v = chooseNeighbor(v, graphs, alias_method_j, alias_method_q, layer)
path.append(v)
else:
r = random.random()
limiar_moveup = prob_moveup(amount_neighbours[layer][v])
if (r > limiar_moveup):
if (layer > initialLayer):
layer = layer - 1
else:
if ((layer + 1) in graphs and v in graphs[layer + 1]):
layer = layer + 1
t1 = time()
logging.info('RW - vertex {}. Time : {}s'.format(original_v, (t1 - t0)))
return path
def exec_ramdom_walks_for_chunck(vertices, graphs, alias_method_j, alias_method_q, walk_length, amount_neighbours):
walks = deque()
for v in vertices:
walks.append(exec_random_walk(graphs, alias_method_j, alias_method_q, v, walk_length, amount_neighbours))
return walks
def generate_random_walks_large_graphs(num_walks, walk_length, workers, vertices):
logging.info('Loading distances_nets from disk...')
graphs = restoreVariableFromDisk('distances_nets_graphs')
alias_method_j = restoreVariableFromDisk('nets_weights_alias_method_j')
alias_method_q = restoreVariableFromDisk('nets_weights_alias_method_q')
amount_neighbours = restoreVariableFromDisk('amount_neighbours')
logging.info('Creating RWs...')
t0 = time()
walks = deque()
initialLayer = 0
parts = workers
with ProcessPoolExecutor(max_workers=workers) as executor:
for walk_iter in range(num_walks):
random.shuffle(vertices)
logging.info("Execution iteration {} ...".format(walk_iter))
walk = exec_ramdom_walks_for_chunck(vertices, graphs, alias_method_j, alias_method_q, walk_length,
amount_neighbours)
walks.extend(walk)
logging.info("Iteration {} executed.".format(walk_iter))
t1 = time()
logging.info('RWs created. Time : {}m'.format((t1 - t0) / 60))
logging.info("Saving Random Walks on disk...")
save_random_walks(walks)
def generate_random_walks(num_walks, walk_length, workers, vertices):
logging.info('Loading distances_nets on disk...')
graphs = restoreVariableFromDisk('distances_nets_graphs')
alias_method_j = restoreVariableFromDisk('nets_weights_alias_method_j')
alias_method_q = restoreVariableFromDisk('nets_weights_alias_method_q')
amount_neighbours = restoreVariableFromDisk('amount_neighbours')
logging.info('Creating RWs...')
t0 = time()
walks = deque()
initialLayer = 0
if (workers > num_walks):
workers = num_walks
with ProcessPoolExecutor(max_workers=workers) as executor:
futures = {}
for walk_iter in range(num_walks):
random.shuffle(vertices)
job = executor.submit(exec_ramdom_walks_for_chunck, vertices, graphs, alias_method_j, alias_method_q,
walk_length, amount_neighbours)
futures[job] = walk_iter
# part += 1
logging.info("Receiving results...")
for job in as_completed(futures):
walk = job.result()
r = futures[job]
logging.info("Iteration {} executed.".format(r))
walks.extend(walk)
del futures[job]
t1 = time()
logging.info('RWs created. Time: {}m'.format((t1 - t0) / 60))
logging.info("Saving Random Walks on disk...")
save_random_walks(walks)
def save_random_walks(walks):
with open('random_walks.txt', 'w') as file:
for walk in walks:
line = ''
for v in walk:
line += str(v) + ' '
line += '\n'
file.write(line)
return
def prob_moveup(amount_neighbours):
x = math.log(amount_neighbours + math.e)
p = (x / (x + 1))
return p
def alias_draw(J, q):
'''
Draw sample from a non-uniform discrete distribution using alias sampling.
'''
K = len(J)
kk = int(np.floor(np.random.rand() * K))
if np.random.rand() < q[kk]:
return kk
else:
return J[kk]
| 6,624 | 30.103286 | 115 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/algorithms_distances.py | # -*- coding: utf-8 -*-
import math
import os
from collections import deque
from concurrent.futures import ProcessPoolExecutor, as_completed
import numpy as np
from fastdtw import fastdtw
from bionev.struc2vec.utils import *
limiteDist = 20
def getDegreeListsVertices(g, vertices, calcUntilLayer):
degreeList = {}
for v in vertices:
degreeList[v] = getDegreeLists(g, v, calcUntilLayer)
return degreeList
def getCompactDegreeListsVertices(g, vertices, maxDegree, calcUntilLayer):
degreeList = {}
for v in vertices:
degreeList[v] = getCompactDegreeLists(g, v, maxDegree, calcUntilLayer)
return degreeList
def getCompactDegreeLists(g, root, maxDegree, calcUntilLayer):
t0 = time()
listas = {}
vetor_marcacao = [0] * (max(g) + 1)
# Marcar s e inserir s na fila Q
queue = deque()
queue.append(root)
vetor_marcacao[root] = 1
l = {}
## Variáveis de controle de distância
depth = 0
pendingDepthIncrease = 0
timeToDepthIncrease = 1
while queue:
vertex = queue.popleft()
timeToDepthIncrease -= 1
d = len(g[vertex])
if (d not in l):
l[d] = 0
l[d] += 1
for v in g[vertex]:
if (vetor_marcacao[v] == 0):
vetor_marcacao[v] = 1
queue.append(v)
pendingDepthIncrease += 1
if (timeToDepthIncrease == 0):
list_d = []
for degree, freq in l.items():
list_d.append((degree, freq))
list_d.sort(key=lambda x: x[0])
listas[depth] = np.array(list_d, dtype=np.int32)
l = {}
if (calcUntilLayer == depth):
break
depth += 1
timeToDepthIncrease = pendingDepthIncrease
pendingDepthIncrease = 0
t1 = time()
logging.info('BFS vertex {}. Time: {}s'.format(root, (t1 - t0)))
return listas
def getDegreeLists(g, root, calcUntilLayer):
t0 = time()
listas = {}
vetor_marcacao = [0] * (max(g) + 1)
# Marcar s e inserir s na fila Q
queue = deque()
queue.append(root)
vetor_marcacao[root] = 1
l = deque()
## Variáveis de controle de distância
depth = 0
pendingDepthIncrease = 0
timeToDepthIncrease = 1
while queue:
vertex = queue.popleft()
timeToDepthIncrease -= 1
l.append(len(g[vertex]))
for v in g[vertex]:
if (vetor_marcacao[v] == 0):
vetor_marcacao[v] = 1
queue.append(v)
pendingDepthIncrease += 1
if (timeToDepthIncrease == 0):
lp = np.array(l, dtype='float')
lp = np.sort(lp)
listas[depth] = lp
l = deque()
if (calcUntilLayer == depth):
break
depth += 1
timeToDepthIncrease = pendingDepthIncrease
pendingDepthIncrease = 0
t1 = time()
logging.info('BFS vertex {}. Time: {}s'.format(root, (t1 - t0)))
return listas
def ct(a, b):
ep = 0.5
m = max(a, b) + ep
mi = min(a, b) + ep
return ((m / mi) - 1)
def ct_min(a, b):
ep = 0.5
m = max(a[0], b[0]) + ep
mi = min(a[0], b[0]) + ep
return ((m / mi) - 1) * min(a[1], b[1])
def ct_max(a, b):
ep = 0.5
m = max(a[0], b[0]) + ep
mi = min(a[0], b[0]) + ep
return ((m / mi) - 1) * max(a[1], b[1])
def preprocess_degreeLists():
logging.info("Recovering degreeList from disk...")
degreeList = restoreVariableFromDisk('degreeList')
logging.info("Creating compactDegreeList...")
dList = {}
dFrequency = {}
for v, layers in degreeList.items():
dFrequency[v] = {}
for layer, degreeListLayer in layers.items():
dFrequency[v][layer] = {}
for degree in degreeListLayer:
if (degree not in dFrequency[v][layer]):
dFrequency[v][layer][degree] = 0
dFrequency[v][layer][degree] += 1
for v, layers in dFrequency.items():
dList[v] = {}
for layer, frequencyList in layers.items():
list_d = []
for degree, freq in frequencyList.items():
list_d.append((degree, freq))
list_d.sort(key=lambda x: x[0])
dList[v][layer] = np.array(list_d, dtype='float')
logging.info("compactDegreeList created!")
saveVariableOnDisk(dList, 'compactDegreeList')
def verifyDegrees(degrees, degree_v_root, degree_a, degree_b):
if (degree_b == -1):
degree_now = degree_a
elif (degree_a == -1):
degree_now = degree_b
elif (abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):
degree_now = degree_b
else:
degree_now = degree_a
return degree_now
def get_vertices(v, degree_v, degrees, a_vertices):
a_vertices_selected = 2 * math.log(a_vertices, 2)
# logging.info("Selecionando {} próxim ao vértice {} ...".format(int(a_vertices_selected),v))
vertices = deque()
try:
c_v = 0
for v2 in degrees[degree_v]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if ('before' not in degrees[degree_v]):
degree_b = -1
else:
degree_b = degrees[degree_v]['before']
if ('after' not in degrees[degree_v]):
degree_a = -1
else:
degree_a = degrees[degree_v]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
while True:
for v2 in degrees[degree_now]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if (degree_now == degree_b):
if ('before' not in degrees[degree_b]):
degree_b = -1
else:
degree_b = degrees[degree_b]['before']
else:
if ('after' not in degrees[degree_a]):
degree_a = -1
else:
degree_a = degrees[degree_a]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
except StopIteration:
# logging.info("Vértice {} - próxim selecionad.".format(v))
return list(vertices)
return list(vertices)
def splitDegreeList(part, c, G, compactDegree):
if (compactDegree):
logging.info("Recovering compactDegreeList from disk...")
degreeList = restoreVariableFromDisk('compactDegreeList')
else:
logging.info("Recovering degreeList from disk...")
degreeList = restoreVariableFromDisk('degreeList')
logging.info("Recovering degree vector from disk...")
degrees = restoreVariableFromDisk('degrees_vector')
degreeListsSelected = {}
vertices = {}
a_vertices = len(G)
for v in c:
nbs = get_vertices(v, len(G[v]), degrees, a_vertices)
vertices[v] = nbs
degreeListsSelected[v] = degreeList[v]
for n in nbs:
degreeListsSelected[n] = degreeList[n]
saveVariableOnDisk(vertices, 'split-vertices-' + str(part))
saveVariableOnDisk(degreeListsSelected, 'split-degreeList-' + str(part))
def calc_distances(part, compactDegree=False):
vertices = restoreVariableFromDisk('split-vertices-' + str(part))
degreeList = restoreVariableFromDisk('split-degreeList-' + str(part))
distances = {}
if compactDegree:
dist_func = ct_max
else:
dist_func = ct
for v1, nbs in vertices.items():
lists_v1 = degreeList[v1]
for v2 in nbs:
t00 = time()
lists_v2 = degreeList[v2]
max_layer = min(len(lists_v1), len(lists_v2))
distances[v1, v2] = {}
for layer in range(0, max_layer):
dist, path = fastdtw(lists_v1[layer], lists_v2[layer], radius=1, dist=dist_func)
distances[v1, v2][layer] = dist
t11 = time()
logging.info('fastDTW between vertices ({}, {}). Time: {}s'.format(v1, v2, (t11 - t00)))
preprocess_consolides_distances(distances)
saveVariableOnDisk(distances, 'distances-' + str(part))
return
def calc_distances_all(vertices, list_vertices, degreeList, part, compactDegree=False):
distances = {}
cont = 0
if compactDegree:
dist_func = ct_max
else:
dist_func = ct
for v1 in vertices:
lists_v1 = degreeList[v1]
for v2 in list_vertices[cont]:
lists_v2 = degreeList[v2]
max_layer = min(len(lists_v1), len(lists_v2))
distances[v1, v2] = {}
for layer in range(0, max_layer):
# t0 = time()
dist, path = fastdtw(lists_v1[layer], lists_v2[layer], radius=1, dist=dist_func)
# t1 = time()
# logging.info('D ({} , {}), Tempo fastDTW da camada {} : {}s . Distância: {}'.format(v1,v2,layer,(t1-t0),dist))
distances[v1, v2][layer] = dist
cont += 1
preprocess_consolides_distances(distances)
saveVariableOnDisk(distances, 'distances-' + str(part))
return
def selectVertices(layer, fractionCalcDists):
previousLayer = layer - 1
logging.info("Recovering distances from disk...")
distances = restoreVariableFromDisk('distances')
threshold = calcThresholdDistance(previousLayer, distances, fractionCalcDists)
logging.info('Selecting vertices...')
vertices_selected = deque()
for vertices, layers in distances.items():
if (previousLayer not in layers):
continue
if (layers[previousLayer] <= threshold):
vertices_selected.append(vertices)
distances = {}
logging.info('Vertices selected.')
return vertices_selected
def preprocess_consolides_distances(distances, startLayer=1):
logging.info('Consolidating distances...')
for vertices, layers in distances.items():
keys_layers = sorted(layers.keys())
startLayer = min(len(keys_layers), startLayer)
for layer in range(0, startLayer):
keys_layers.pop(0)
for layer in keys_layers:
layers[layer] += layers[layer - 1]
logging.info('Distances consolidated.')
def exec_bfs_compact(G, workers, calcUntilLayer):
futures = {}
degreeList = {}
t0 = time()
vertices = list(G.keys())
parts = workers
chunks = partition(vertices, parts)
logging.info('Capturing larger degree...')
maxDegree = 0
for v in vertices:
if (len(G[v]) > maxDegree):
maxDegree = len(G[v])
logging.info('Larger degree captured')
with ProcessPoolExecutor(max_workers=workers) as executor:
part = 1
for c in chunks:
job = executor.submit(getCompactDegreeListsVertices, G, c, maxDegree, calcUntilLayer)
futures[job] = part
part += 1
for job in as_completed(futures):
dl = job.result()
v = futures[job]
degreeList.update(dl)
logging.info("Saving degreeList on disk...")
saveVariableOnDisk(degreeList, 'compactDegreeList')
t1 = time()
logging.info('Execution time - BFS: {}m'.format((t1 - t0) / 60))
return
def exec_bfs(G, workers, calcUntilLayer):
futures = {}
degreeList = {}
t0 = time()
vertices = list(G.keys())
parts = workers
chunks = partition(vertices, parts)
with ProcessPoolExecutor(max_workers=workers) as executor:
part = 1
for c in chunks:
job = executor.submit(getDegreeListsVertices, G, c, calcUntilLayer)
futures[job] = part
part += 1
for job in as_completed(futures):
dl = job.result()
v = futures[job]
degreeList.update(dl)
logging.info("Saving degreeList on disk...")
saveVariableOnDisk(degreeList, 'degreeList')
t1 = time()
logging.info('Execution time - BFS: {}m'.format((t1 - t0) / 60))
return
def generate_distances_network_part1(workers):
parts = workers
weights_distances = {}
for part in range(1, parts + 1):
logging.info('Executing part {}...'.format(part))
distances = restoreVariableFromDisk('distances-' + str(part))
for vertices, layers in distances.items():
for layer, distance in layers.items():
vx = vertices[0]
vy = vertices[1]
if (layer not in weights_distances):
weights_distances[layer] = {}
weights_distances[layer][vx, vy] = distance
logging.info('Part {} executed.'.format(part))
for layer, values in weights_distances.items():
saveVariableOnDisk(values, 'weights_distances-layer-' + str(layer))
return
def generate_distances_network_part2(workers):
parts = workers
graphs = {}
for part in range(1, parts + 1):
logging.info('Executing part {}...'.format(part))
distances = restoreVariableFromDisk('distances-' + str(part))
for vertices, layers in distances.items():
for layer, distance in layers.items():
vx = vertices[0]
vy = vertices[1]
if (layer not in graphs):
graphs[layer] = {}
if (vx not in graphs[layer]):
graphs[layer][vx] = []
if (vy not in graphs[layer]):
graphs[layer][vy] = []
graphs[layer][vx].append(vy)
graphs[layer][vy].append(vx)
logging.info('Part {} executed.'.format(part))
for layer, values in graphs.items():
saveVariableOnDisk(values, 'graphs-layer-' + str(layer))
return
def generate_distances_network_part3():
layer = 0
while (isPickle('graphs-layer-' + str(layer))):
graphs = restoreVariableFromDisk('graphs-layer-' + str(layer))
weights_distances = restoreVariableFromDisk('weights_distances-layer-' + str(layer))
logging.info('Executing layer {}...'.format(layer))
alias_method_j = {}
alias_method_q = {}
weights = {}
for v, neighbors in graphs.items():
e_list = deque()
sum_w = 0.0
for n in neighbors:
if (v, n) in weights_distances:
wd = weights_distances[v, n]
else:
wd = weights_distances[n, v]
w = np.exp(-float(wd))
e_list.append(w)
sum_w += w
e_list = [x / sum_w for x in e_list]
weights[v] = e_list
J, q = alias_setup(e_list)
alias_method_j[v] = J
alias_method_q[v] = q
saveVariableOnDisk(weights, 'distances_nets_weights-layer-' + str(layer))
saveVariableOnDisk(alias_method_j, 'alias_method_j-layer-' + str(layer))
saveVariableOnDisk(alias_method_q, 'alias_method_q-layer-' + str(layer))
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info('Weights created.')
return
def generate_distances_network_part4():
logging.info('Consolidating graphs...')
graphs_c = {}
layer = 0
while (isPickle('graphs-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
graphs = restoreVariableFromDisk('graphs-layer-' + str(layer))
graphs_c[layer] = graphs
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info("Saving distancesNets on disk...")
saveVariableOnDisk(graphs_c, 'distances_nets_graphs')
logging.info('Graphs consolidated.')
return
def generate_distances_network_part5():
alias_method_j_c = {}
layer = 0
while (isPickle('alias_method_j-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
alias_method_j = restoreVariableFromDisk('alias_method_j-layer-' + str(layer))
alias_method_j_c[layer] = alias_method_j
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info("Saving nets_weights_alias_method_j on disk...")
saveVariableOnDisk(alias_method_j_c, 'nets_weights_alias_method_j')
return
def generate_distances_network_part6():
alias_method_q_c = {}
layer = 0
while (isPickle('alias_method_q-layer-' + str(layer))):
logging.info('Executing layer {}...'.format(layer))
alias_method_q = restoreVariableFromDisk('alias_method_q-layer-' + str(layer))
alias_method_q_c[layer] = alias_method_q
logging.info('Layer {} executed.'.format(layer))
layer += 1
logging.info("Saving nets_weights_alias_method_q on disk...")
saveVariableOnDisk(alias_method_q_c, 'nets_weights_alias_method_q')
return
def generate_distances_network(workers):
t0 = time()
logging.info('Creating distance network...')
os.system("rm " + returnPathStruc2vec() + "/pickles/weights_distances-layer-*.pickle")
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part1, workers)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 1: {}s'.format(t))
t0 = time()
os.system("rm " + returnPathStruc2vec() + "/pickles/graphs-layer-*.pickle")
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part2, workers)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 2: {}s'.format(t))
logging.info('distance network created.')
logging.info('Transforming distances into weights...')
t0 = time()
os.system("rm " + returnPathStruc2vec() + "/pickles/distances_nets_weights-layer-*.pickle")
os.system("rm " + returnPathStruc2vec() + "/pickles/alias_method_j-layer-*.pickle")
os.system("rm " + returnPathStruc2vec() + "/pickles/alias_method_q-layer-*.pickle")
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part3)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 3: {}s'.format(t))
t0 = time()
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part4)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 4: {}s'.format(t))
t0 = time()
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part5)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 5: {}s'.format(t))
t0 = time()
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network_part6)
job.result()
t1 = time()
t = t1 - t0
logging.info('- Time - part 6: {}s'.format(t))
return
def alias_setup(probs):
'''
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
'''
K = len(probs)
q = np.zeros(K)
J = np.zeros(K, dtype=np.int)
smaller = []
larger = []
for kk, prob in enumerate(probs):
q[kk] = K * prob
if q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
J[small] = large
q[large] = q[large] + q[small] - 1.0
if q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return J, q
| 20,177 | 28.074928 | 128 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/graph.py | # -*- coding: utf-8 -*-
"""Graph utilities."""
import logging
from collections import Iterable, defaultdict
from concurrent.futures import ProcessPoolExecutor
from io import open
from itertools import permutations
from multiprocessing import cpu_count
from time import time
from six import iterkeys
from six.moves import range, zip_longest
class Graph(defaultdict):
"""Efficient basic implementation of nx `Graph' – Undirected graphs with self loops"""
def __init__(self):
super(Graph, self).__init__(list)
def nodes(self):
return self.keys()
def adjacency_iter(self):
return self.items()
def subgraph(self, nodes={}):
subgraph = Graph()
for n in nodes:
if n in self:
subgraph[n] = [x for x in self[n] if x in nodes]
return subgraph
def make_undirected(self):
t0 = time()
for v in self.keys():
for other in self[v]:
if v != other:
self[other].append(v)
t1 = time()
# logger.info('make_directed: added missing edges {}s'.format(t1-t0))
self.make_consistent()
return self
def make_consistent(self):
t0 = time()
for k in iterkeys(self):
self[k] = list(sorted(set(self[k])))
t1 = time()
# logger.info('make_consistent: made consistent in {}s'.format(t1-t0))
# self.remove_self_loops()
return self
def remove_self_loops(self):
removed = 0
t0 = time()
for x in self:
if x in self[x]:
self[x].remove(x)
removed += 1
t1 = time()
# logger.info('remove_self_loops: removed {} loops in {}s'.format(removed, (t1-t0)))
return self
def check_self_loops(self):
for x in self:
for y in self[x]:
if x == y:
return True
return False
def has_edge(self, v1, v2):
if v2 in self[v1] or v1 in self[v2]:
return True
return False
def degree(self, nodes=None):
if isinstance(nodes, Iterable):
return {v: len(self[v]) for v in nodes}
else:
return len(self[nodes])
def order(self):
"Returns the number of nodes in the graph"
return len(self)
def number_of_edges(self):
"Returns the number of nodes in the graph"
return sum([self.degree(x) for x in self.keys()]) / 2
def number_of_nodes(self):
"Returns the number of nodes in the graph"
return self.order()
def gToDict(self):
d = {}
for k, v in self.items():
d[k] = v
return d
def printAdjList(self):
for key, value in self.items():
print(key, ":", value)
def clique(size):
return from_adjlist(permutations(range(1, size + 1)))
# http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks-in-python
def grouper(n, iterable, padvalue=None):
"grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')"
return zip_longest(*[iter(iterable)] * n, fillvalue=padvalue)
def parse_adjacencylist(f):
adjlist = []
for l in f:
if l and l[0] != "#":
introw = [int(x) for x in l.strip().split()]
row = [introw[0]]
row.extend(set(sorted(introw[1:])))
adjlist.extend([row])
return adjlist
def parse_adjacencylist_unchecked(f):
adjlist = []
for l in f:
if l and l[0] != "#":
adjlist.extend([[int(x) for x in l.strip().split()]])
return adjlist
def load_adjacencylist(file_, undirected=False, chunksize=10000, unchecked=True):
if unchecked:
parse_func = parse_adjacencylist_unchecked
convert_func = from_adjlist_unchecked
else:
parse_func = parse_adjacencylist
convert_func = from_adjlist
adjlist = []
t0 = time()
with open(file_) as f:
with ProcessPoolExecutor(max_workers=cpu_count()) as executor:
total = 0
for idx, adj_chunk in enumerate(executor.map(parse_func, grouper(int(chunksize), f))):
adjlist.extend(adj_chunk)
total += len(adj_chunk)
t1 = time()
logging.info('Parsed {} edges with {} chunks in {}s'.format(total, idx, t1 - t0))
t0 = time()
G = convert_func(adjlist)
t1 = time()
logging.info('Converted edges to graph in {}s'.format(t1 - t0))
if undirected:
t0 = time()
G = G.make_undirected()
t1 = time()
logging.info('Made graph undirected in {}s'.format(t1 - t0))
return G
def load_edgelist(file_, undirected=True):
G = Graph()
with open(file_) as f:
for l in f:
if (len(l.strip().split()[:2]) > 1):
x, y = l.strip().split()[:2]
x = int(x)
y = int(y)
G[x].append(y)
if undirected:
G[y].append(x)
else:
x = l.strip().split()[:2]
x = int(x[0])
G[x] = []
G.make_consistent()
return G
# def load_matfile(file_, variable_name="network", undirected=True):
# mat_varables = loadmat(file_)
# mat_matrix = mat_varables[variable_name]
#
# return from_numpy(mat_matrix, undirected)
def from_networkx(G_input, undirected=True):
G = Graph()
for idx, x in enumerate(G_input.nodes_iter()):
for y in iterkeys(G_input[x]):
G[x].append(y)
if undirected:
G.make_undirected()
return G
# def from_numpy(x, undirected=True):
# G = Graph()
#
# if issparse(x):
# cx = x.tocoo()
# for i,j,v in zip(cx.row, cx.col, cx.data):
# G[i].append(j)
# else:
# raise Exception("Dense matrices not yet supported.")
#
# if undirected:
# G.make_undirected()
#
# G.make_consistent()
# return G
def from_adjlist(adjlist):
G = Graph()
for row in adjlist:
node = row[0]
neighbors = row[1:]
G[node] = list(sorted(set(neighbors)))
return G
def from_adjlist_unchecked(adjlist):
G = Graph()
for row in adjlist:
node = row[0]
neighbors = row[1:]
G[node] = neighbors
return G
def from_dict(d):
G = Graph()
for k, v in d.items():
G[k] = v
return G
| 6,465 | 22.512727 | 102 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/struc2vec.py | # -*- coding: utf-8 -*-
from bionev.struc2vec.algorithms import *
from bionev.struc2vec.algorithms_distances import *
class Graph:
def __init__(self, g, workers, is_directed=False, untilLayer=None):
logging.info(" - Converting graph to dict...")
self.G = g.gToDict()
logging.info("Graph converted.")
self.num_vertices = g.number_of_nodes()
self.num_edges = g.number_of_edges()
self.is_directed = is_directed
self.workers = workers
self.calcUntilLayer = untilLayer
logging.info('Graph - Number of vertices: {}'.format(self.num_vertices))
logging.info('Graph - Number of edges: {}'.format(self.num_edges))
def preprocess_neighbors_with_bfs(self):
with ProcessPoolExecutor(max_workers=self.workers) as executor:
job = executor.submit(exec_bfs, self.G, self.workers, self.calcUntilLayer)
job.result()
return
def preprocess_neighbors_with_bfs_compact(self):
with ProcessPoolExecutor(max_workers=self.workers) as executor:
job = executor.submit(exec_bfs_compact, self.G, self.workers, self.calcUntilLayer)
job.result()
return
def preprocess_degree_lists(self):
with ProcessPoolExecutor(max_workers=self.workers) as executor:
job = executor.submit(preprocess_degreeLists)
job.result()
return
def create_vectors(self):
logging.info("Creating degree vectors...")
degrees = {}
degrees_sorted = set()
G = self.G
for v in list(G.keys()):
degree = len(G[v])
degrees_sorted.add(degree)
if (degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = deque()
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted), dtype='int')
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for index, degree in enumerate(degrees_sorted):
if (index > 0):
degrees[degree]['before'] = degrees_sorted[index - 1]
if (index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[index + 1]
logging.info("Degree vectors created.")
logging.info("Saving degree vectors...")
saveVariableOnDisk(degrees, 'degrees_vector')
def calc_distances_all_vertices(self, compactDegree=False):
logging.info("Using compactDegree: {}".format(compactDegree))
if (self.calcUntilLayer):
logging.info("Calculations until layer: {}".format(self.calcUntilLayer))
futures = {}
count_calc = 0
vertices = list(reversed(sorted(list(self.G.keys()))))
if (compactDegree):
logging.info("Recovering degreeList from disk...")
degreeList = restoreVariableFromDisk('compactDegreeList')
else:
logging.info("Recovering compactDegreeList from disk...")
degreeList = restoreVariableFromDisk('degreeList')
parts = self.workers
chunks = partition(vertices, parts)
t0 = time()
with ProcessPoolExecutor(max_workers=self.workers) as executor:
part = 1
for c in chunks:
logging.info("Executing part {}...".format(part))
list_v = []
for v in c:
list_v.append([vd for vd in degreeList.keys() if vd > v])
job = executor.submit(calc_distances_all, c, list_v, degreeList, part, compactDegree=compactDegree)
futures[job] = part
part += 1
logging.info("Receiving results...")
for job in as_completed(futures):
job.result()
r = futures[job]
logging.info("Part {} Completed.".format(r))
logging.info('Distances calculated.')
t1 = time()
logging.info('Time : {}m'.format((t1 - t0) / 60))
return
def calc_distances(self, compactDegree=False):
logging.info("Using compactDegree: {}".format(compactDegree))
if (self.calcUntilLayer):
logging.info("Calculations until layer: {}".format(self.calcUntilLayer))
futures = {}
# distances = {}
count_calc = 0
G = self.G
vertices = list(G.keys())
parts = self.workers
chunks = partition(vertices, parts)
with ProcessPoolExecutor(max_workers=1) as executor:
logging.info("Split degree List...")
part = 1
for c in chunks:
job = executor.submit(splitDegreeList, part, c, G, compactDegree)
job.result()
logging.info("degreeList {} completed.".format(part))
part += 1
with ProcessPoolExecutor(max_workers=self.workers) as executor:
part = 1
for c in chunks:
logging.info("Executing part {}...".format(part))
job = executor.submit(calc_distances, part, compactDegree=compactDegree)
futures[job] = part
part += 1
logging.info("Receiving results...")
for job in as_completed(futures):
job.result()
r = futures[job]
logging.info("Part {} completed.".format(r))
return
def consolide_distances(self):
distances = {}
parts = self.workers
for part in range(1, parts + 1):
d = restoreVariableFromDisk('distances-' + str(part))
preprocess_consolides_distances(distances)
distances.update(d)
preprocess_consolides_distances(distances)
saveVariableOnDisk(distances, 'distances')
def create_distances_network(self):
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_distances_network, self.workers)
job.result()
return
def preprocess_parameters_random_walk(self):
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_parameters_random_walk, self.workers)
job.result()
return
def simulate_walks(self, num_walks, walk_length):
# for large graphs, it is serially executed, because of memory use.
if (len(self.G) > 500000):
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_random_walks_large_graphs, num_walks, walk_length, self.workers,
list(self.G.keys()))
job.result()
else:
with ProcessPoolExecutor(max_workers=1) as executor:
job = executor.submit(generate_random_walks, num_walks, walk_length, self.workers, list(self.G.keys()))
job.result()
return
| 6,956 | 31.059908 | 119 | py |
BioNEV | BioNEV-master/src/bionev/struc2vec/utils.py | # -*- coding: utf-8 -*-
import inspect
import logging
import os.path
import pickle as pickle
from itertools import islice
from time import time
dir_f = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
folder_pickles = dir_f + "/pickles/"
def returnPathStruc2vec():
return dir_f
def isPickle(fname):
return os.path.isfile(dir_f + '/pickles/' + fname + '.pickle')
def chunks(data, SIZE=10000):
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def partition(lst, n):
division = len(lst) / float(n)
return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in range(n)]
def restoreVariableFromDisk(name):
logging.info('Recovering variable...')
t0 = time()
val = None
with open(folder_pickles + name + '.pickle', 'rb') as handle:
val = pickle.load(handle)
t1 = time()
logging.info('Variable recovered. Time: {}m'.format((t1 - t0) / 60))
return val
def saveVariableOnDisk(f, name):
logging.info('Saving variable on disk...')
t0 = time()
with open(folder_pickles + name + '.pickle', 'wb') as handle:
pickle.dump(f, handle, protocol=pickle.HIGHEST_PROTOCOL)
t1 = time()
logging.info('Variable saved. Time: {}m'.format((t1 - t0) / 60))
return
| 1,341 | 23.851852 | 92 | py |
null | LERG-main/README.md | # LERG
LERG (Local Explanation of Response Generation) is a unified approach to explain why a conditional text generation model will predict a text.
For more details, please refer to the paper [Local Explanation of Dialogue Response Generation, Neurips 2021](https://arxiv.org/pdf/2106.06528.pdf).
## Install
LERG can be installed from PyPI
```
pip install lerg
```
## Usage
### Reproduce our results
* Download our used data and model from [Drive](https://drive.google.com/drive/folders/1dXpdH6mPfk9uO6c2cskKJfMJ98bl0_32?usp=sharing)
* To explain dialogue generation models via methods `LERG_S`,`LERG_L`, and baselines `SHAP`, `LIME`, run
```
python explain.py --explain_method LERG_S --model_dir path-to-your-model-dir --data_path path-to-your-data-in-json
```
* After run `explain.py`, get the `<time-stamp>` of saved explanations that stored in format `"%m%d%Y_%H%M%S"`, run
```
python eval.py --explain_method LERG_S --time_stamp time-stamp-in-specified-format --model_dir path-to-your-model-dir --data_path path-to-your-data-in-json [--plot]
```
* To explain dialogue generation models via baselines `attn`(attention), `grad`(gradient), `none`(random), run
```
python eval.py --explain_method attn --time_stamp None --model_dir path-to-your-model-dir --data_path path-to-your-data-in-json [--plot]
```
### Use as package
You can find Jupyter notebook in `examples/`.
The overall idea is to first import a purtabation model and an explanation method, then you can use LERG as:
```
from lerg.perturbation_models import RandomPM
from lerg.RG_explainers import LERG_SHAP_log as LERG_S
PM = RandomPM()
perturb_f = PM.perturb_inputs
local_exp = LERG_S(<replace_with_your_model_forward_function>, input_str, output_str, perturb_f, your_tokenizer)
phi_set, phi_map, input_segments, output_segments = local_exp.get_local_exp()
```
## Results
Perplexity Changes after removal (PPLC\_R): (Slide with removal rate)
| Method | 0.1 | 0.2 | 0.3 | 0.4 | 0.5 |
| --------- | --- | --- | --- | --- | --- |
| Random | 1.0699 | 1.1528 | 1.2317 | 1.3395 | 1.4449 |
| Attention | 1.0307 | 1.0931 | 1.1803 | 1.3106 | 1.479 |
| Gradient | 1.0578 | 1.1898 | 1.3244 | 1.4751 | 1.6340 |
| LIME | 1.1342 | 1.3667 | 1.5278 | 1.6807 | 1.8180 |
| SHAP | 1.1348 | 1.4038 | 1.5890 | 1.7823 | 1.9441 |
| LERG\_L | 1.1442 | 1.3945 | 1.5554 | 1.7216 | 1.8646 |
| LERG\_S | 1.2006 | 1.5588 | 1.8038 | 2.0332 | 2.2148 |
Perplexity with additives (PPL\_A): (Slide with additive rate)
| Method | 0.5 | 0.6 | 0.7 | 0.8 | 0.9 |
| --------- | --- | --- | --- | --- | --- |
| Random | 13.1789 | 14.0605 | 15.3738 | 16.5017 | 17.8391 |
| Attention | 13.4672 | 14.1226 | 14.9678 | 15.7989 | 16.9803 |
| Gradient | 12.7876 | 13.3019 | 14.1291 | 15.0506 | 16.1721 |
| LIME | 12.3468 | 12.5363 | 13.0447 | 13.5929 | 14.3374 |
| SHAP | 12.1357 | 12.2112 | 12.5584 | 12.9923 | 13.6296 |
| LERG\_L | 12.0076 | 12.1196 | 12.5866 | 13.1717 | 13.8904 |
| LERG\_S | 11.5665 | 11.4350 | 11.5816 | 11.9123 | 12.4341 |
## Citation
If you find LERG is helpful to your research, we would appreciate a citation of this paper.
```
@article{tuan2021local,
title={Local Explanation of Dialogue Response Generation},
author={Tuan, Yi-Lin and Pryor, Connor and Chen, Wenhu and Getoor, Lise and Wang, William Yang},
journal={Advances in Neural Information Processing Systems},
volume={34},
year={2021}
}
```
| 3,445 | 41.02439 | 164 | md |
null | LERG-main/eval.py | from target_models import GPT
from lerg.metrics import ppl_c, ppl_c_add
from lerg.visualize import plot_interactions
import tqdm
import sys
import json
import torch
import numpy as np
import os
from datetime import datetime
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--explain_method",type=str,required=True,
help="Choose from 'LERG_S', 'LERG_L', 'SHAP', 'LIME', 'attn', 'grad', 'none'(random)")
parser.add_argument("--time_stamp",type=str,required=True,
help="None for 'attn','grad','none'(random); for others, the time stamp in format '%m%d%Y_%H%M%S' of the saved explanations after runing 'explain.py'")
parser.add_argument("--model_dir",type=str,required=True,
help="Directory of the trained target model")
parser.add_argument("--data_path",type=str,required=True,
help="Path of the data for explaining the target model on")
parser.add_argument("--plot",action='store_true',
help="If true, plot the interactions (maps) for all data points")
args = parser.parse_args()
def read_data(data_path):
with open(data_path,"r") as fin:
raw_data = json.load(fin)
data = [(line["history"][-1], line["gt_response"]) for line in raw_data["test"]]
return data
pplc_r_ratios = [0.1,0.2,0.3,0.4,0.5]
ppl_a_ratios = [0.5,0.6,0.7,0.8,0.9]
def evaluate_exp(tokenizer, model_f, data_path):
data = read_data(data_path)
avg_pplc = [0 for _ in pplc_r_ratios]
avg_pplc_add = [0 for _ in ppl_a_ratios]
def count_stats(phi_set, phi_map, x_components, y_components, model_f):
for i, r in enumerate(pplc_r_ratios):
entc, x_re, _, _ = ppl_c(phi_set, x_components, y_components, model_f, ratio=r)
avg_pplc[i] += entc
for i, r in enumerate(ppl_a_ratios):
ent_add, *_ = ppl_c_add(phi_set, x_components, y_components, model_f, ratio=r)
avg_pplc_add[i] += ent_add
example_id = 0
count = 0
if args.plot:
if not os.path.exists("plots/{}/".format(args.explain_method)):
os.mkdir("plots/{}".format(args.explain_method))
if args.explain_method == "attn" or args.explain_method == "none" or args.explain_method == "grad":
for x, y in tqdm.tqdm(data):
if len(tokenizer.tokenize(x)) <= 30 and len(tokenizer.tokenize(y)) <= 30:
if args.explain_method != "none":
phi_set, phi_map, x_components, y_components = model_f([x],y,output_type=args.explain_method)
if args.plot:
plot_interactions(phi_map,x_components,y_components,save_path='plots/{}/{}_{}.png'.format(args.explain_method, example_id, args.time_stamp))
else:
phi_set, phi_map = None, None
x_components = tokenizer.tokenize(x)
y_components = tokenizer.tokenize(y)
count_stats(phi_set, phi_map, x_components, y_components, model_f)
count += 1
example_id += 1
else:
for x, y in tqdm.tqdm(data):
exp_path = 'exp/{}_{}_{}.exp'.format(args.explain_method, example_id, args.time_stamp)
if os.path.exists(exp_path):
phi_set, phi_map, x_components, y_components = torch.load(exp_path)
if args.plot:
plot_interactions(phi_map,x_components,y_components,save_path='plots/{}/{}_{}.png'.format(args.explain_method, example_id, args.time_stamp))
count_stats(phi_set, phi_map, x_components, y_components, model_f)
count += 1
example_id += 1
print(count)
print("PPLC_R:{}".format([np.exp(-pplc_r/count) for pplc_r in avg_pplc]))
print("PPL_A:{}".format([np.exp(-pplc_a/count) for pplc_a in avg_pplc_add]))
if __name__ == "__main__":
model = GPT(model_dir=args.model_dir)
evaluate_exp(model.tokenizer, model.forward, args.data_path)
| 3,915 | 44.534884 | 164 | py |
null | LERG-main/explain.py | from lerg.perturbation_models import RandomPM, LIMERandomPM
from lerg.RG_explainers import LERG_LIME, LERG_R, LERG_SHAP, LERG_SHAP_log
from target_models import GPT
import torch
import tqdm
import sys
import json
import os
from datetime import datetime
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--explain_method",type=str,required=True,
help="Choose from 'LERG_S', 'LERG_L', 'SHAP', 'LIME'")
parser.add_argument("--model_dir",type=str,required=True,
help="Directory of the trained target model")
parser.add_argument("--data_path",type=str,required=True,
help="Path of the data for explaining the target model on")
args = parser.parse_args()
def read_data(data_path):
with open(data_path,"r") as fin:
raw_data = json.load(fin)
data = [(line["history"][-1], line["gt_response"]) for line in raw_data["test"]]
return data
def explain_dataset(explainer, model_f, tokenizer, data_path):
if isinstance(explainer, tuple):
PM, LERG = explainer
perturb_f = PM.perturb_inputs
else:
LERG = explainer
perturb_f = None
data = read_data(data_path)
avg_pplc = 0
example_id = 0
now = datetime.now()
nowstr = now.strftime("%m%d%Y_%H%M%S")
if not os.path.exists("exp/"):
os.mkdir("exp")
for x, y in tqdm.tqdm(data):
# experiment on sentences with length less than 30, such that can get explanation using 8G GPU
if len(tokenizer.tokenize(x)) <= 30 and len(tokenizer.tokenize(y)) <= 30:
local_exp = LERG(model_f, x, y, perturb_f, tokenizer)
phi_set, phi_map, x_components, y_components = local_exp.get_local_exp()
save_path = 'exp/{}_{}_{}.exp'.format(args.explain_method, example_id, nowstr)
local_exp.save_exp(save_path)
example_id += 1
if __name__ == "__main__":
PM = RandomPM()
if args.explain_method == "LIME":
PM = LIMERandomPM()
explainer = (PM, LERG_LIME)
elif args.explain_method == "LERG_L":
PM = LIMERandomPM()
explainer = (PM, LERG_LIME_R)
elif args.explain_method == "SHAP":
explainer = (PM, LERG_SHAP)
elif args.explain_method == "LERG_S":
explainer = (PM, LERG_SHAP_log)
else:
raise ValueError("select an explainer from \{'LIME', 'SHAP', 'LERG_L', 'LERG_S'\}, currently is {}".format(args.explain_method))
model = GPT(model_dir=args.model_dir)
explain_dataset(explainer, model.forward, model.tokenizer, args.data_path)
| 2,528 | 36.746269 | 136 | py |
null | LERG-main/target_models.py | import torch
import torch.nn.functional as F
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
from transformers import GPT2Tokenizer, GPT2LMHeadModel
def get_sum_multi_head_attentions(multi_head_attentions):
return sum(torch.sum(x,1) for x in multi_head_attentions)
class GPT:
def __init__(self, model_dir="models/dailydialog_gpt", device="cuda" if torch.cuda.is_available() else "cpu", evaluate=False):
self.device = device
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(model_dir)
self.model = OpenAIGPTLMHeadModel.from_pretrained(model_dir, output_attentions=True).to(device)
self.SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
self.bos, self.eos, self.speaker1, self.speaker2, self.padding = \
self.tokenizer.convert_tokens_to_ids(self.SPECIAL_TOKENS)
def forward(self, inputs, label=None, is_x_tokenized=False, is_y_tokenized=False, output_type="prob"):
x_set = [self.tokenizer.convert_tokens_to_ids(x) for x in inputs] if is_x_tokenized \
else [self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(x)) for x in inputs]
x_set = [[self.speaker1] + x for x in x_set]
max_l = max(len(x) for x in x_set)
x_set = [x + [self.padding] * (max_l - len(x)) for x in x_set]
y = self.tokenizer.convert_tokens_to_ids(label) if is_y_tokenized \
else self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(label))
y = [self.speaker2] + y
input_ids = torch.tensor([[self.bos] + x+y for x in x_set]).to(self.device)
token_type_ids = torch.tensor([[self.speaker1] * (len(x)+1) + [self.speaker2] * len(y) for x in x_set]).to(self.device)
if output_type == "prob":
with torch.no_grad():
outputs = self.model(input_ids, token_type_ids=token_type_ids)
probs = F.softmax(outputs.logits, dim=-1)
return probs[:,-len(y):-1,:], y[1:]
elif output_type == "attn":
with torch.no_grad():
outputs = self.model(input_ids, token_type_ids=token_type_ids)
probs = F.softmax(outputs.logits, dim=-1)
attn = get_sum_multi_head_attentions(outputs[-1])
attn = attn[0][max_l+1:, 1:max_l]
attn_map = {}
for xi in range(max_l-1):
for yi in range(len(y)-1):
attn_map[(xi,yi)] = attn[yi,xi]
attn_set = torch.sum(attn,dim=0)
return attn_set, attn_map, self.tokenizer.tokenize(inputs[0]), self.tokenizer.tokenize(label)
elif output_type == "grad":
count = 0
for param in self.model.parameters():
if count > 0:
param.requires_grad = False
else:
embeds = param
count +=1
input_ids = torch.tensor([[self.bos] + x+y + [self.eos] for x in x_set]).to(self.device)
token_type_ids = torch.tensor([[self.speaker1] * (len(x)+1) + [self.speaker2] * (len(y)+1) for x in x_set]).to(self.device)
outputs = self.model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
losses = F.cross_entropy(outputs.logits[0,max_l+2:-1,:], input_ids[0,max_l+3:], reduction="none")
scores = []
for j in range(len(y)-1):
grads = torch.autograd.grad(losses[j],embeds,retain_graph=True, create_graph=False)[0]
mod = embeds - grads
changes = torch.norm(mod, dim=1) - torch.norm(embeds, dim=1)
scores.append(changes[input_ids[0,2:max_l+1]])
grad_map = {}
for xi in range(max_l-1):
for yi in range(len(y)-1):
grad_map[(xi,yi)] = scores[yi][xi]
grad_set = torch.sum(torch.stack(scores),dim=0)
return grad_set, grad_map, self.tokenizer.tokenize(inputs[0]), self.tokenizer.tokenize(label)
| 4,007 | 55.450704 | 135 | py |
null | LERG-main/lerg/RG_explainers.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.linear_model import Ridge
import numpy as np
import random
class Explainer():
"""
The base class for various explainers
arguments:
model_f: the tested model function, who generates y given x
require the outputs in the form (probabilities of output sequence, y's tokens ids), having the same length
x: the input
y: the generated sequence
return:
phi_set: correspond to the weight of each x_i
"""
def __init__(self, model_f, x, y):
self.phi_map = {}
self.model_f = model_f
self.x = x
self.y = y
def get_prob(self, probs, y):
y_probs = [[p[yi] for p, yi in zip(prob, y)] for prob in probs]
return y_probs
class LERG(Explainer):
"""
The base class for all Local Explanation methods for Response Generation
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y)
self.perturb_inputs = perturb_f
self.max_iters = max_iters
self.tokenizer = tokenizer
self.device = device
def combine_sequence(self, phi_sets):
"""
phi_sets.shape: (output_dim) x (input_dim)
"""
return torch.sum(phi_sets,dim=0)
def map_to_interactions(self, phi_sets):
phi_map = {}
for yi in range(phi_sets.shape[0]):
for xi in range(phi_sets.shape[1]):
phi_map[(xi,yi)] = phi_sets[yi,xi]
return phi_map
def save_exp(self, save_path):
if self.phi_set is not None:
torch.save([self.phi_set, self.phi_map, self.components, self.y], save_path)
else:
raise ValueError("run get_local_exp() first")
class LERG_LIME(LERG):
"""
LERG by LIME
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters, device=device)
self.batchsize = 64
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
x_set, z_set, self.components = self.perturb_inputs(self.x)
y_probs = []
for i in range(len(x_set)//self.batchsize + 1 if len(x_set)%self.batchsize > 0 else 0):
probs,y = self.model_f(x_set[i*self.batchsize:(i+1)*self.batchsize], label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_batch = self.get_prob(probs, y)
y_probs_batch = torch.tensor(y_probs_batch)
y_probs.append(y_probs_batch)
y_probs = torch.cat(y_probs,dim=0)
D = pairwise_distances(z_set,z_set[0].view(1,-1),metric='cosine')
kernel_width = 25# as LIME's original implementation
weights = torch.tensor(np.sqrt(np.exp(-(D ** 2) / kernel_width ** 2)), requires_grad=False).to(self.device)
self.expl_model = nn.Linear(z_set.shape[1],len(y),bias=False).to(self.device)
self.optimizer = torch.optim.SGD(self.expl_model.parameters(), lr=5e-1)
for i in range(self.max_iters):
for z_batch, y_probs_batch, w_batch in zip(torch.split(z_set, self.batchsize), torch.split(y_probs, self.batchsize), torch.split(weights,self.batchsize)):
z_batch = z_batch.to(self.device)
y_probs_batch = y_probs_batch.to(self.device)
preds = self.expl_model(z_batch)# the original version for classifier
loss = torch.mean(w_batch * (preds - y_probs_batch) ** 2)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
with torch.no_grad():
phi_sets = self.expl_model.weight
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_R(LERG):
"""
LERG use ratio probability
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, max_iters=50, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters, device=device)
self.batchsize = 64
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
x_set, z_set, self.components = self.perturb_inputs(self.x)
gold_probs,y = self.model_f([self.x], label=self.y, is_x_tokenized=True, is_y_tokenized=True)
gold_probs = self.get_prob(gold_probs, y)
gold_probs = torch.tensor(gold_probs)
gold_probs = gold_probs[0]
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
y_probs /= gold_probs
self.expl_model = nn.Linear(z_set.shape[1],len(y),bias=False).to(self.device)
self.optimizer = torch.optim.SGD(self.expl_model.parameters(), lr=5e-1)
for i in range(self.max_iters):
for z_batch, y_probs_batch in zip(torch.split(z_set, self.batchsize), torch.split(y_probs, self.batchsize)):
z_batch = z_batch.to(self.device)
y_probs_batch = y_probs_batch.to(self.device)
preds = self.expl_model(z_batch)
loss = F.mse_loss(preds,y_probs_batch)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
with torch.no_grad():
phi_sets = self.expl_model.weight
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_SHAP(LERG):
"""
LERG use SampleShapley (original)
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters=0, device=device)
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
phi_sets = []
for i in range(len(self.x)):
x_set, x_set_with_i, weights, self.components = \
self.perturb_inputs(self.x, num=500//len(self.x), with_i=i)# results in total 1000 samples as LERG_LIME
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
probs, _ = self.model_f(x_set_with_i, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_with_i = self.get_prob(probs, y)
y_probs_with_i = torch.tensor(y_probs_with_i)
weights = torch.tensor(weights).view(-1,1)
phi_sets.append(torch.mean((y_probs_with_i - y_probs)*weights, dim=0))
phi_sets = torch.stack(phi_sets).transpose(0,1)
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
class LERG_SHAP_log(LERG):
"""
LERG use Shapley value with sample mean (Logarithm)
"""
def __init__(self, model_f, x, y, perturb_f, tokenizer, device="cuda" if torch.cuda.is_available() else "cpu"):
super().__init__(model_f, x, y, perturb_f, tokenizer, max_iters=0, device=device)
def get_local_exp(self):
self.x = self.tokenizer.tokenize(self.x)
self.y = self.tokenizer.tokenize(self.y)
phi_sets = []
for i in range(len(self.x)):
x_set, x_set_with_i, weights, self.components = \
self.perturb_inputs(self.x, num=500//len(self.x), with_i=i)# results in total 1000 samples as LERG_LIME
probs,y = self.model_f(x_set, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs = self.get_prob(probs, y)
y_probs = torch.tensor(y_probs)
probs, _ = self.model_f(x_set_with_i, label=self.y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_with_i = self.get_prob(probs, y)
y_probs_with_i = torch.tensor(y_probs_with_i)
phi_sets.append(torch.mean((torch.log(y_probs_with_i) - torch.log(y_probs)), dim=0))
phi_sets = torch.stack(phi_sets).transpose(0,1)
self.phi_set = self.combine_sequence(phi_sets)
self.phi_map = self.map_to_interactions(phi_sets)
return self.phi_set, self.phi_map, self.components, self.y
| 8,905 | 39.666667 | 166 | py |
null | LERG-main/lerg/__init__.py | 0 | 0 | 0 | py | |
null | LERG-main/lerg/metrics.py | import torch
import numpy as np
import random
import pdb
from scipy.stats import skew
from collections import Counter
def get_expl(x, expl, ratio=0.2, remain_masks=False):
if expl is None:
x_entities = [tok for tok in x if random.random() < ratio] if not remain_masks else [tok if random.random() < ratio else "__" for tok in x ]
else:
k = int(len(x) * ratio // 1)
topk = torch.topk(expl, k) if not remain_masks else torch.topk(expl, max(k,1))
x_entities = [tok for ind, tok in enumerate(x) if ind in topk.indices] if not remain_masks else [tok if ind in topk.indices else "__" for ind, tok in enumerate(x)]
if remain_masks:
merged = [x_entities[0]]
for tok in x_entities[1:]:
if tok == "__" and merged[-1] == "__":
continue
else:
merged.append(tok)
x_entities = merged
return x_entities
def remove_expl(x, expl, ratio=0.2):
"""
remove given explanation from x
if None explanation is given, randomly remove
"""
if expl is None:
x_re = [tok for tok in x if random.random() >= ratio]
else:
k = int(len(x) * ratio // 1)
topk = torch.topk(expl, k)
x_re = [tok for ind, tok in enumerate(x) if ind not in topk.indices]
return x_re
def get_ppl(probs, y, y_inds=None):
if y_inds is None:
ent = np.sum(np.log(p[yi]) for p, yi in zip(probs[0], y)) / len(y)
else:
ent = np.sum(np.log(p[yi]) for i, (p, yi) in enumerate(zip(probs[0], y)) if i in y_inds) / len(y_inds)
return np.exp(-ent), ent
def ppl_c_add(expl, x, y, model_f, ratio=0.2):
"""
additive perplexity changes
"""
x_add = get_expl(x, expl, ratio=ratio)
y_probs_add, y_inds = model_f([x_add], label=y, is_x_tokenized=True, is_y_tokenized=True)
ppl_add, ent_add = get_ppl(y_probs_add.cpu(), y_inds)
return ent_add, ppl_add, x_add
def ppl_c(expl, x, y, model_f, ratio=0.2):
"""
perplexity changes
"""
x_re = remove_expl(x, expl, ratio=ratio)
y_probs, y_inds = model_f([x], label=y, is_x_tokenized=True, is_y_tokenized=True)
y_probs_re, _ = model_f([x_re], label=y, is_x_tokenized=True, is_y_tokenized=True)
ppl, ent = get_ppl(y_probs.cpu(), y_inds)
ppl_re, ent_re = get_ppl(y_probs_re.cpu(), y_inds)
entc = ent_re - ent
return entc, x_re, ppl, ppl_re
| 2,391 | 33.666667 | 171 | py |
null | LERG-main/lerg/perturbation_models.py | import torch
import warnings
import math
import random
import numpy as np
import pdb
import scipy as sp
import sklearn
from transformers import BartTokenizer, BartForConditionalGeneration
import torch
def binomial_coef_dist(n):
dist = [math.comb(n, i+1) for i in range(n//2)]
total = sum(dist)
dist = [density / total for density in dist]
return dist, total
class BasicPM():
def __init__(self):
pass
def perturb_inputs(self, x, num=1):
"""
argument:
x: the tokenized input sentence
return:
x_set: the perturbed xs
z_set: the simplified features of x_set, {0,1}^|x|, tensor
"""
if num != 1:
warnings.warn("BasicPM will always set argument num == 1")
return [x],torch.tensor([[1.0 for tok in x]])
class RandomPM(BasicPM):
"""
randomly choose tokens to be replaced with sub_t
"""
def __init__(self, sub_t="", denoising=False):
super().__init__()
self.sub_t = sub_t
self.denoising = denoising
if self.denoising:
self.sub_t = '<mask>'
self.bart_tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
self.bart_model = BartForConditionalGeneration.from_pretrained('facebook/bart-base').to('cuda')
def _select_repl_num(self, dist_scale):
"""
select the number of tokens to be replaces #repl_num, following binomial distribution
"""
pos = random.random()
for i in range(len(dist_scale)):
if pos < dist_scale[i]:
break
return i+1
def _denoise_x_set(self, x_set):
inputs = self.bart_tokenizer(x_set, max_length=256, return_tensors='pt', padding=True).to('cuda')
summary_ids = self.bart_model.generate(
inputs['input_ids'],
top_k=10, top_p=0.9, temperature=0.9, max_length=256,
early_stopping=True, num_return_sequences=1
)
lines = [self.bart_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
x_set = lines
return x_set
def perturb_inputs(self, x, num=1000, with_i=None):
"""
allow half tokens at most can be replaced
"""
dist, num_comb = binomial_coef_dist(len(x))
dist_scale = [sum(dist[:i+1]) for i in range(len(dist))]
num = num if num < num_comb*4 else num_comb*4
"""
choose tokens to be replaced with sub_t
"""
if with_i is None:
x_set, z_set = [], []
else:
x_set, x_set_with_i = [], []
weights = []
for _ in range(num):
repl_num = self._select_repl_num(dist_scale)
x_set.append(list(x))
if with_i is None:
z_set.append(np.ones((len(x),)))
repl_list = random.sample(list(range(len(x))), repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
z_set[-1][t] = 0.
else:
x_set_with_i.append(list(x))
indices_to_repl = list(range(len(x)))
indices_to_repl.remove(with_i)
repl_list = random.sample(indices_to_repl, repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
x_set_with_i[-1][t] = self.sub_t
x_set[-1][with_i] = self.sub_t
weights.append(1/(dist[repl_num-1]*len(x)))
if self.denoising:
x_set = self._denoise_x_set([' '.join(x) for x in x_set])
if with_i is None:
return x_set, torch.tensor(z_set, dtype=torch.float32), x
else:
return x_set, x_set_with_i, weights, x
class LIMERandomPM(RandomPM):
def perturb_inputs(self, x, num=1000):
"""
allow half tokens at most can be replaced
choose tokens to be replaced with sub_t
"""
dist, num_comb = binomial_coef_dist(len(x))
num = num if num < num_comb*4 else num_comb*4
x_set, z_set = [], []
sample = np.random.randint(1,len(x)//2+1,num-1)
x_set.append(list(x))
z_set.append(np.ones((len(x),)))
for i in range(num-1):
repl_num = sample[i]
x_set.append(list(x))
z_set.append(np.ones((len(x),)))
repl_list = random.sample(list(range(len(x))), repl_num)
for t in repl_list:
x_set[-1][t] = self.sub_t
z_set[-1][t] = 0.
if self.denoising:
x_set = self._denoise_x_set([' '.join(x) for x in x_set])
return x_set, torch.tensor(z_set, dtype=torch.float32), x
| 4,757 | 33.230216 | 130 | py |
null | LERG-main/lerg/visualize.py | import numpy as np
import matplotlib.pyplot as plt
def plot_interactions(phi_map,x,y):
values = np.around([[phi_map[(i,j)].item() for i in range(len(x))] for j in range(len(y))], decimals=2)
fig = plt.figure()
ax = plt.axes()
im = ax.imshow(values, cmap=plt.get_cmap('Reds'))
ax.set_xticks(np.arange(len(x)))
ax.set_yticks(np.arange(len(y)))
ax.set_xticklabels(x, fontsize=11)
ax.set_yticklabels(y, fontsize=11)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
for i in range(len(y)):
for j in range(len(x)):
text = ax.text(j, i, values[i, j],
ha="center", va="center", color="w")
return fig, ax
| 733 | 33.952381 | 107 | py |
null | vnncomp2021_results-main/README.md | # vnncomp2021_results
results for vnncomp 2021. The csv files for all tools are in results_csv. The scores are computed using process_results.py, with stdout redirected to the output_*.txt files.
Summary scores are near the end of the file. You can check a specific benchmark by looking in the file. For example, to see network 4-2 property 2 of acasxu, I can look for the file for the following part:
```
Row: ['ACASXU_run2a_4_2_batch_2000-prop_2', '-', '6.4 (h)', '10.5 (h)', 'timeout', '41.1 (h)', 'timeout', 'timeout', '64.8 (h)', '62.5 (h)', 'timeout', 'timeout', 'timeout', '-']
73: nnv score: 0
73: nnenum score: 12
73: venus2 score: 11
73: NN-R score: 0
73: VeriNet score: 10
73: DNNF score: 0
73: Debona score: 0
73: a-b-CROWN score: 10
73: oval score: 10
73: Marabou score: 0
73: ERAN score: 0
73: NV.jl score: 0
73: randgen score: 0
```
The tools are listed in order, and row is the times and result for each tool. So nnenum should be holds with a time of 6.4 (after subtracting overhead). Sure enough, if you look in results_csv/nnenum.csv at the corresponding line you see:
`
acasxu,./benchmarks/acasxu/ACASXU_run2a_4_2_batch_2000.onnx,./benchmarks/acasxu/prop_2.vnnlib,.007249637,holds,7.374020909
`
The runtime was 7.374020909, which after subtracting the overhead of 1.0 secs you get 6.37 which roudns to 6.4.
The scores are also listed for each tool. Since nneum was the fastest, it got 12 points (10 for corrext + 2 for time bouns as fastest. Venus2, at 10.5 seconds, was the second fastest so it get 11 points. None of the remaining tools were within two seconds, so they all got 10 points.
You can adjust some parameters in the scoring defined in main(). Specifically, you can change how incorrect results are judged by changing `resolve_conflicts` in the code:
```
# how to resolve conflicts (some tools output holds others output violated)
# "voting": majority rules, tie = ighore
# "odd_one_out": only if single tool has mismatch, assume it's wrong
# "ignore": ignore all conflicts
resolve_conflicts = "voting"
```
### Changes from VNNCOMP Presentation Results
Based on additional feedback from tool authors, we made the following changes after the VNNCOMP presentation. All changes to scoring can be adjusted through flags in the `main()` function.
1. ERAN included optimized overhead for benchamarks that didn't use the GPU. This optimization had the side effect of reducing their overhead measurement for all benchmarks, inflating their times on GPU benchmarks by a few seconds (and thus reducing their score).
You can change how overhead is measured by modifying the `single_overhead` flag. If True, then a uniform overhead is used for all measurements per tool (original). If False, ERAN will use a separate overhead measurement for the acasxu and eran benchmarks.
2. Neural Network Reach renamed to RPM. Also RPM's mnistfc results have been removed due to a VNNLIB parsing bug on that benchmark found after the competition. This can be disabled by commenting out the line: `skip_benchmarks['RPM'] = ['minstfc']`
| 3,058 | 57.826923 | 283 | md |
null | vnncomp2021_results-main/process_results.py | """
Process vnncomp results
Stanley Bak
"""
from typing import Dict, List
import glob
import csv
from pathlib import Path
from collections import defaultdict
import numpy as np
class ToolResult:
"""Tool's result"""
# columns
CATEGORY = 0
NETWORK = 1
PROP = 2
PREPARE_TIME = 3
RESULT = 4
RUN_TIME = 5
all_categories = set()
# stats
num_verified = defaultdict(int) # number of benchmarks verified
num_violated = defaultdict(int)
num_holds = defaultdict(int)
incorrect_results = defaultdict(int)
num_categories = defaultdict(int)
def __init__(self, tool_name, csv_path, cpu_benchmarks, skip_benchmarks):
assert "csv" in csv_path
self.tool_name = tool_name
self.category_to_list = defaultdict(list) # maps category -> list of results
self.skip_benchmarks = skip_benchmarks
self.cpu_benchmarks = cpu_benchmarks
self.gpu_overhead = np.inf # default overhead
self.cpu_overhead = np.inf # if using separate overhead for cpu
self.max_prepare = 0.0
self.load(csv_path)
def result_instance_str(self, cat, index):
"""get a string representation of the instance for the given category and index"""
row = self.category_to_list[cat][index]
net = row[ToolResult.NETWORK]
prop = row[ToolResult.PROP]
return Path(net).stem + "-" + Path(prop).stem
def single_result(self, cat, index):
"""get result_str, runtime of tool, after subtracting overhead"""
row = self.category_to_list[cat][index]
res = row[ToolResult.RESULT]
t = float(row[ToolResult.RUN_TIME])
t -= self.cpu_overhead if cat in self.cpu_benchmarks else self.gpu_overhead
# all results less than 1.0 second are treated the same
if t < 1.0:
t = 1.0
return res, t
def load(self, csv_path):
"""load data from file"""
unexpected_results = set()
with open(csv_path, newline='') as csvfile:
for row in csv.reader(csvfile):
# rename results
row[ToolResult.RESULT] = row[ToolResult.RESULT].lower()
substitutions = [('unsat', 'holds'),
('sat', 'violated'),
('no_result_in_file', 'unknown'),
('prepare_instance_error_', 'unknown'),
('run_instance_timeout', 'timeout'),
('error_exit_code_', 'error'),
]
for from_prefix, to_str in substitutions:
if row[ToolResult.RESULT] == '': # don't use '' as prefix
row[ToolResult.RESULT] = 'unknown'
elif row[ToolResult.RESULT].startswith(from_prefix):
row[ToolResult.RESULT] = to_str
network = row[ToolResult.NETWORK]
result = row[ToolResult.RESULT]
cat = row[ToolResult.CATEGORY]
prepare_time = float(row[ToolResult.PREPARE_TIME])
run_time = float(row[ToolResult.RUN_TIME])
if cat in self.skip_benchmarks:
result = row[ToolResult.RESULT] = "unknown"
if not ("test_nano" in network or "test_tiny" in network):
self.category_to_list[cat].append(row)
if result not in ["holds", "violated", "timeout", "error", "unknown"]:
unexpected_results.add(result)
if result in ["holds", "violated"]:
if cat in self.cpu_benchmarks:
self.cpu_overhead = min(self.cpu_overhead, run_time)
else:
self.gpu_overhead = min(self.gpu_overhead, run_time)
self.max_prepare = max(self.max_prepare, prepare_time)
assert not unexpected_results, f"Unexpected results: {unexpected_results}"
print(f"Loaded {self.tool_name}, default-overhead (gpu): {round(self.gpu_overhead, 1)}s," + \
f"cpu-overhead: {round(self.cpu_overhead, 1)}s, " + \
f"prepare time: {round(self.max_prepare, 1)}s")
self.delete_empty_categories()
def delete_empty_categories(self):
"""delete categories without successful measurements"""
to_remove = ["test"]
for key in self.category_to_list.keys():
rows = self.category_to_list[key]
should_remove = True
for row in rows:
result = row[ToolResult.RESULT]
if result in ('holds', 'violated'):
should_remove = False
break
if should_remove:
to_remove.append(key)
elif key != "test":
ToolResult.all_categories.add(key)
for key in to_remove:
print(f"deleting {key} in tool {self.tool_name}")
del self.category_to_list[key]
ToolResult.num_categories[self.tool_name] = len(self.category_to_list)
def compare_results(result_list, resolve_conflicts, single_overhead):
"""compare results across tools"""
min_percent = 0 # minimum percent for total score
total_score = defaultdict(int)
all_cats = {}
for cat in ToolResult.all_categories:
print(f"\nCategory {cat}:")
# maps tool_name -> [score, num_verified, num_falsified, num_fastest]
cat_score: Dict[str, List[int, int, int, int]] = {}
all_cats[cat] = cat_score
num_rows = 0
participating_tools = []
for tool_result in result_list:
cat_dict = tool_result.category_to_list
if not cat in cat_dict:
continue
rows = cat_dict[cat]
assert num_rows == 0 or len(rows) == num_rows, f"tool {tool_result.tool_name}, cat {cat}, " + \
f"got {len(rows)} rows expected {num_rows}"
if num_rows == 0:
num_rows = len(rows)
print(f"Category {cat} has {num_rows} (from {tool_result.tool_name})")
participating_tools.append(tool_result)
# work with participating tools only
tool_names = [t.tool_name for t in participating_tools]
print(f"{len(participating_tools)} participating tools: {tool_names}")
table_rows = []
for index in range(num_rows):
rand_gen_succeeded = False
times_holds = []
times_violated = []
table_row = []
table_rows.append(table_row)
instance_str = participating_tools[0].result_instance_str(cat, index)
table_row.append(instance_str)
for t in participating_tools:
res, secs = t.single_result(cat, index)
if res == "unknown":
table_row.append("-")
continue
if not res in ["holds", "violated"]:
table_row.append(res)
continue
if res == "holds":
times_holds.append(secs)
else:
assert res == "violated"
times_violated.append(secs)
table_row.append(f"{round(secs, 1)} ({res[0]})")
if t.tool_name == "randgen":
assert res == "violated"
rand_gen_succeeded = True
print()
if times_holds and times_violated:
print(f"WARNING: multiple results for index {index}. Violated: {len(times_violated)}, " + \
f"Holds: {len(times_holds)}")
table_row.append('*multiple results*')
print(f"Row: {table_row}")
for t in participating_tools:
res, secs = t.single_result(cat, index)
score, is_verified, is_falsified, is_fastest = get_score(t.tool_name, res, secs, rand_gen_succeeded,
times_holds, times_violated,
resolve_conflicts=resolve_conflicts)
print(f"{index}: {t.tool_name} score: {score}, is_ver: {is_verified}, is_fals: {is_falsified}, " + \
f"is_fastest: {is_fastest}")
if t.tool_name in cat_score:
tool_score_tup = cat_score[t.tool_name]
else:
tool_score_tup = [0, 0, 0, 0]
cat_score[t.tool_name] = tool_score_tup
# [score, num_verified, num_falsified, num_fastest]
tool_score_tup[0] += score
tool_score_tup[1] += 1 if is_verified else 0
tool_score_tup[2] += 1 if is_falsified else 0
tool_score_tup[3] += 1 if is_fastest else 0
tool_score_tup = None
print("--------------------")
print(", ".join(tool_names))
for table_row in table_rows:
print(", ".join(table_row))
print(f"---------\nCategory {cat}:")
max_score = max([t[0] for t in cat_score.values()])
for tool, score_tup in cat_score.items():
score = score_tup[0]
percent = max(min_percent, 100 * score / max_score)
print(f"{tool}: {score} ({round(percent, 2)}%)")
if cat != 'cifar2020':
total_score[tool] += percent
print("\n###############")
print("### Summary ###")
print("###############")
for cat in sorted(all_cats.keys()):
cat_score = all_cats[cat]
print(f"\n% Category {cat} (conflicts={resolve_conflicts}, single_overhead={single_overhead}):")
res_list = []
max_score = max([t[0] for t in cat_score.values()])
cat_str = cat.replace('_', '-')
print_table_header(f"Benchmark \\texttt{{{cat_str}}}", "tab:cat_{cat}",
("\\# ~", "Tool", "Verified", "Falsified", "Fastest", "Score", "Percent"),
align='lllllrr')
for tool, score_tup in cat_score.items():
score, num_verified, num_falsified, num_fastest = score_tup
percent = max(min_percent, 100 * score / max_score)
tool_latex = latex_tool_name(tool)
#desc = f"{tool}: {score} ({round(percent, 2)}%)"
desc = f"{tool_latex} & {num_verified} & {num_falsified} & {num_fastest} & {score} & {round(percent, 1)}\\% \\\\"
res_list.append((percent, desc))
for i, s in enumerate(reversed(sorted(res_list))):
#print(f"{i+1}. {s[1]}")
print(f"{i+1} & {s[1]}")
print_table_footer()
res_list = []
print(f"\nTotal Score (conflicts={resolve_conflicts}, single_overhead={single_overhead}):")
print_table_header("Overall Score", "tab:score", ["\\# ~", "Tool", "Score"])
for tool, score in total_score.items():
tool_latex = latex_tool_name(tool)
desc = f"{tool_latex} & {round(score, 1)} \\\\"
res_list.append((score, desc))
for i, s in enumerate(reversed(sorted(res_list))):
print(f"{i+1} & {s[1]}")
print_table_footer()
def print_table_header(title, label, columns, align=None):
"""print latex table header"""
bold_columns = ["\\textbf{" + c + "}" for c in columns]
if align is None:
align = 'l' * len(columns)
else:
assert len(columns) == len(align)
print('\n\\begin{table}[h]')
print('\\begin{center}')
print('\\caption{' + title + '} \\label{' + label + '}')
print('{\\setlength{\\tabcolsep}{2pt}')
print('\\begin{tabular}[h]{@{}' + align + '@{}}')
print('\\toprule')
print(' & '.join(bold_columns) + "\\\\")
#\textbf{\# ~} & \textbf{Tool} & \textbf{Score} \\
print('\\midrule')
def print_table_footer():
"""print latex table footer"""
print('''\\bottomrule
\\end{tabular}
}
\\end{center}
\\end{table}\n\n''')
def get_score(tool_name, res, secs, rand_gen_succeded, times_holds, times_violated, resolve_conflicts):
"""Get the score for the given result
Actually returns a 4-tuple: score, is_verified, is_falsified, is_fastest
Correct hold: 10 points
Correct violated (where random tests did not succeed): 10 points
Correct violated (where random test succeeded): 1 point
Incorrect result: -100 points
Time bonus:
The fastest tool for each solved instance will receive +2 points.
The second fastest tool will receive +1 point.
If two tools have runtimes within 0.2 seconds, we will consider them the same runtime.
"""
# how to resolve conflicts (some tools output holds others output violated)
# "voting": majority rules, tie = ighore
# "odd_one_out": only if single tool has mismatch, assume it's wrong
# "ignore": ignore all conflicts
assert resolve_conflicts in ["voting", "odd_one_out", "ignore"]
is_verified = False
is_falsified = False
is_fastest = False
num_holds = len(times_holds)
num_violated = len(times_violated)
if res not in ["holds", "violated"] or num_holds == num_violated:
score = 0
elif resolve_conflicts == "ignore" and num_holds > 0 and num_violated > 0:
score = 0
elif resolve_conflicts == "odd_one_out" and num_holds > 1 and num_violated > 1:
score = 0
elif rand_gen_succeded:
assert res == "violated"
score = 1
ToolResult.num_verified[tool_name] += 1
ToolResult.num_violated[tool_name] += 1
is_falsified = True
elif num_holds > num_violated and res == "violated":
score = -100
ToolResult.incorrect_results[tool_name] += 1
elif num_violated > num_holds and res == "holds":
score = -100
ToolResult.incorrect_results[tool_name] += 1
else:
# correct result!
ToolResult.num_verified[tool_name] += 1
if res == "holds":
is_verified = True
times = times_holds.copy()
ToolResult.num_holds[tool_name] += 1
else:
assert res == "violated"
times = times_violated.copy()
ToolResult.num_violated[tool_name] += 1
is_falsified = True
score = 10
min_time = min(times)
if secs < min_time + 0.2:
score += 2
is_fastest = True
else:
times.remove(min_time)
second_time = min(times)
if secs < second_time + 0.2:
score += 1
return score, is_verified, is_falsified, is_fastest
def print_stats(result_list):
"""print stats about measurements"""
print('\n------- Stats ----------')
print("\nOverhead:")
olist = []
for r in result_list:
olist.append((r.gpu_overhead, r.cpu_overhead, r.tool_name))
print_table_header("Overhead", "tab:overhead", ["\\# ~", "Tool", "Seconds", "~~CPU Mode"], align='llrr')
for i, n in enumerate(sorted(olist)):
cpu_overhead = "-" if n[1] == np.inf else round(n[1], 1)
print(f"{i+1} & {n[2]} & {round(n[0], 1)} & {cpu_overhead} \\\\")
print_table_footer()
items = [("Num Benchmarks Participated", ToolResult.num_categories),
("Num Instances Verified", ToolResult.num_verified),
("Num Violated", ToolResult.num_violated),
("Num Holds", ToolResult.num_holds),
("Mismatched (Incorrect) Results", ToolResult.incorrect_results),
]
for index, (label, d) in enumerate(items):
print(f"\n% {label}:")
tab_label = f"tab:stats{index}"
print_table_header(label, tab_label, ["\\# ~", "Tool", "Count"], align='llr')
l = []
for tool, count in d.items():
tool_latex = latex_tool_name(tool)
l.append((count, tool_latex))
for i, s in enumerate(reversed(sorted(l))):
print(f"{i+1} & {s[1]} & {s[0]} \\\\")
print_table_footer()
def latex_tool_name(tool):
"""get latex version of tool name"""
if tool == 'a-b-CROWN':
tool = '$\\alpha$,$\\beta$-CROWN'
return tool
def main():
"""main entry point"""
# use single overhead for all tools. False will have two different overheads for ERAN depending on CPU/GPU
single_overhead = False
print(f"using single_overhead={single_overhead}")
# how to resolve conflicts (some tools output holds others output violated)
# "voting": majority rules, tie = ighore
# "odd_one_out": only if single tool has mismatch, assume it's wrong
# "ignore": ignore all conflicts
# resolve_conflicts = "odd_one_out"
resolve_conflicts = "odd_one_out"
print(f"using resolve_conflicts={resolve_conflicts}")
#####################################3
csv_list = glob.glob("results_csv/*.csv")
tool_list = [Path(c).stem for c in csv_list]
result_list = []
cpu_benchmarks = {x: [] for x in tool_list}
skip_benchmarks = {x: [] for x in tool_list}
skip_benchmarks['RPM'] = ['mnistfc']
if not single_overhead: # Define a dict with the cpu_only benchmarks for each tool
cpu_benchmarks["ERAN"] = ["acasxu", "eran"]
# doesn't make much difference:
#for t in tool_list:
# cpu_benchmarks[t] = ["acasxu", "eran"]
for csv_path, tool_name in zip(csv_list, tool_list):
tr = ToolResult(tool_name, csv_path, cpu_benchmarks[tool_name], skip_benchmarks[tool_name])
result_list.append(tr)
# compare results across tools
compare_results(result_list, resolve_conflicts, single_overhead)
print_stats(result_list)
if __name__ == "__main__":
main()
| 18,053 | 32.557621 | 125 | py |
null | vnncomp2021_results-main/compare_cifar2020/README.md | comparison for cifar between 2020 and 2021.
The numbered files are created using tail to get the last 138 benchmarks from last years results files. For example:
tail -n 138 ggn-all-verinet.txt > 6.txt
sum.py is then executed to print out the summary statistics in the table
| 278 | 33.875 | 116 | md |
null | vnncomp2021_results-main/compare_cifar2020/sum.py | 'stanley bak'
def main():
'main entry point'
vio2021 = 0
holds2021 = 0
with open('2021.csv') as f:
for line in f:
if '(v)' in line:
vio2021 += 1
elif '(h)' in line:
holds2021 += 1
unknown2021 = 138 - vio2021 - holds2021
print(f"2021, violated: {vio2021}, holds: {holds2021}, unknown: {unknown2021}")
sat_indices = set()
unsat_indices = set()
unknown_indices = set(range(1, 139))
for findex in range(1, 7):
with open(f'{findex}.txt') as f:
line_num = 0
for line in f:
line = line.strip().lower()
line_num += 1
if line_num > 138:
break
if "unsat" in line:
unsat_indices.add(line_num)
if line_num in unknown_indices:
unknown_indices.remove(line_num)
elif "sat" in line:
sat_indices.add(line_num)
if line_num in unknown_indices:
unknown_indices.remove(line_num)
print(f"2020, violated: {len(sat_indices)}, holds: {len(unsat_indices)}, unknown: {len(unknown_indices)}")
assert len(unknown_indices) + len(sat_indices) + len(unsat_indices) == 138
if __name__ == "__main__":
main()
| 1,367 | 26.36 | 110 | py |
null | emil-main/README.md | <img src="images/emil.png" width="800" />
# EMIL
Implementation of the EMIL architecture. Illustrated with MNIST and a simplified ResNet backbone.
## Usage
```python
import torch
from emil import EMIL
net = EMIL(
output_type = 'multiclass',
num_inp_channels = 1,
num_fmap_channels = 128,
att_dim = 128,
num_classes = 10,
patch_size = 1,
patch_stride = 1,
k_min = 100
)
img = torch.randn(1, 1, 32, 32)
pred, pred_local, pred_weight = net(img, output_heatmaps=True) # (1, 10), (1, k, 10), (1, k, 1)
```
`pred_local` holds local patch predictions, `pred_weight` holds attention weights.
## Parameters
`output_type`: string.<br />
Either `multiclass` or `binary`
`num_inp_channels`: int.<br />
Number of input channels
`num_fmap_channels`: int.<br />
Number of channels of the last conv layer
`att_dim`: int.<br />
Number of hidden dimensions in gated attention
`num_classes`: int.<br />
Number of classes
`patch_size`: int.<br />
Patch size in embedding space
`patch_stride`: int.<br />
Patch stride in embedding space
`k_min`: int.<br />
Minimum number of patches to achieve full class score. Default value is based on a maximum number of 16x16=256 patches.
For classical Multiple Instance Learning problems, such as the detection of dental caries, `k_min = 1` is recommended.
## Visualization
Both `pred_local` and `pred_weight` can be visualized as heatmaps. We show an example for MNIST and k_min=100.
<img src="images/mnist_pred_weight.png" width="350" />
| 1,491 | 23.866667 | 119 | md |
null | emil-main/emil.py | import torch
from torch import nn
from resnet import resnet18
class EMIL(nn.Module):
def __init__(self, output_type, num_inp_channels, num_fmap_channels, att_dim, num_classes, patch_size, patch_stride, k_min):
super().__init__()
self.num_classes = num_classes
self.k_min = k_min
self.backbone = resnet18(num_channels=num_inp_channels, num_classes=num_classes)
self.patch_extractor = nn.AvgPool2d(patch_size, patch_stride)
shared_output_layer = nn.Linear(num_fmap_channels, num_classes)
shared_output_layer_att = nn.Softmax(dim=-1) if output_type == 'multiclass' else nn.Sigmoid()
self.shared_output_layer = nn.Sequential(
shared_output_layer,
shared_output_layer_att
)
self.att_tanh = nn.Sequential(
nn.Linear(num_fmap_channels, att_dim),
nn.Tanh()
)
self.att_sigm = nn.Sequential(
nn.Linear(num_fmap_channels, att_dim),
nn.Sigmoid()
)
self.att_outer = nn.Sequential(
nn.Linear(att_dim, 1),
nn.Sigmoid()
)
def forward(self, img, output_heatmaps = False):
x = self.backbone(img)
x = self.patch_extractor(x)
b, c, _, _ = x.shape
x = x.view(b, c, -1).permute(0, 2, 1)
b, k, c = x.shape
x = x.reshape(-1, c)
x_local = self.shared_output_layer(x).view(b, k, self.num_classes)
x_weight = self.att_outer(self.att_tanh(x) * self.att_sigm(x)).view(b, k, 1)
pred = torch.sum(x_local * x_weight, dim = 1) / torch.clamp(torch.sum(x_weight, dim = 1), min = self.k_min)
if output_heatmaps:
return pred, x_local, x_weight
else:
return pred
| 1,789 | 34.098039 | 128 | py |
null | emil-main/main.py | import os
import numpy as np
import torch
from torch import nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from emil import EMIL
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
img_size = 32
b = 256
num_epochs = 20
fmap_dims = (16, 16)
patch_size = 1
patch_stride = 1
output_type = 'multiclass'
model_save_path = 'models/mnist_emil.pt'
net = EMIL(
output_type = output_type,
num_inp_channels = 1,
num_fmap_channels = 128,
att_dim = 128,
num_classes = 10,
patch_size = patch_size,
patch_stride = patch_stride,
k_min = 100
).to(device)
# data prep
img_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()
])
train_data = datasets.MNIST(
root = 'data',
train = True,
transform = img_transform,
download = True,
)
test_data = datasets.MNIST(
root = 'data',
train = False,
transform = img_transform
)
train_loader = DataLoader(
train_data,
batch_size=b,
shuffle=True,
num_workers=1
)
test_loader = DataLoader(
test_data,
batch_size=b,
shuffle=False,
num_workers=1
)
loss_fn = nn.NLLLoss() if output_type == 'multiclass' else nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters())
loss_ls = []
# begin training
for epoch in range(num_epochs):
net.train()
for data in train_loader:
images, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
pred = net(images, output_heatmaps=False)
pred = torch.log(pred) if output_type == 'multiclass' else pred
loss = loss_fn(pred, labels)
loss.backward()
optimizer.step()
net.eval()
correct, total = 0, 0
# evaluate
with torch.no_grad():
for data in test_loader:
images, labels = data[0].to(device), data[1].to(device)
pred = net(images, output_heatmaps=False)
pred_class = torch.max(pred, 1)[1]
correct += (pred_class == labels).sum()
total += labels.shape[0]
pred = torch.log(pred) if output_type == 'multiclass' else pred
loss = loss_fn(pred, labels)
loss_ls.append(loss.item())
print("Epoch: {}, Loss: {}, Accuracy: {}".format(epoch+1, np.mean(loss_ls), correct / total))
# save model
torch.save({
'state_dict': net.state_dict(),
'optimizer' : optimizer.state_dict(),
}, model_save_path) | 2,595 | 25.222222 | 101 | py |
null | emil-main/resnet.py | from typing import Type, Any, Callable, Union, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.hub import load_state_dict_from_url
# from https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py, based on commit d367a01
# changes forward method to output feature map after applying 2 conv blocks
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-f37072fd.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-b627a593.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-0676ba61.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-63fe2227.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-394f9c45.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_channels = 3,
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
f"or a 3-element tuple, got {replace_stride_with_dilation}"
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(num_channels, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(
self,
block: Type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
dilate: bool = False,
) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs) | 15,042 | 36.327543 | 118 | py |
null | emil-main/vis.py | import os
import sys
import torch
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from emil import EMIL
from vis_utils import *
os.environ["CUDA_VISIBLE_DEVICES"]="0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
img_size = 32
output_type = 'multiclass'
model_load_path = 'models/mnist_emil.pt'
patch_pred_map_path = 'imgs/patch_pred_map.png'
patch_weight_map_path = 'imgs/patch_weight_map.png'
fmap_dims = (16, 16)
patch_size = 1
patch_stride = 1
num_classes = 10
num_images = 64
num_patches = int((fmap_dims[0] - patch_size) / patch_stride + 1) ** 2
net = EMIL(
output_type = output_type,
num_inp_channels = 1,
num_fmap_channels = 128,
att_dim = 128,
num_classes = num_classes,
patch_size = patch_size,
patch_stride = patch_stride,
k_min = 100
).to(device)
#data prep
img_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor()
])
test_data = datasets.MNIST(
root = 'data',
train = False,
transform = img_transform
)
test_loader = DataLoader(
test_data,
batch_size=num_images,
shuffle=True,
num_workers=1
)
checkpoint = torch.load(model_load_path)
net.load_state_dict(checkpoint['state_dict'], strict=False)
net.eval()
with torch.no_grad():
for i, data in enumerate(test_loader):
images, labels = data[0].to(device), data[1].to(device)
# load random images
if i == 0:
break
pred, x_local, x_weight = net(images, output_heatmaps=True)
pred_class = torch.max(pred, 1)[1]
idx = torch.repeat_interleave(pred_class, num_patches)
x_local_pred_class = x_local.view(-1, num_classes)[torch.arange(num_images * num_patches), idx].view(num_images, num_patches)
heatmap_local = get_heatmap(x_local_pred_class, fmap_dims, patch_size, patch_stride, img_size)
heatmap_weight = get_heatmap(x_weight[:, :, 0], fmap_dims, patch_size, patch_stride, img_size)
vis_multiple_imgs(images, heatmap_local, fpath=patch_pred_map_path)
vis_multiple_imgs(images, heatmap_weight, fpath=patch_weight_map_path) | 2,153 | 27.72 | 129 | py |
null | emil-main/vis_utils.py | import numpy as np
from matplotlib import pyplot as plt
import torch
from torch import nn
def get_heatmap(patch_scores, fmap_dims, patch_size, patch_stride, img_size):
"""
Returns a heatmap of *img_size*
*patch_scores* is either patch pred probabilities or attention weights
"""
device = patch_scores.device
num_patches_y = int((fmap_dims[1] - patch_size) / patch_stride + 1)
b, k = patch_scores.shape
heatmap = torch.zeros((b, 1, fmap_dims[0], fmap_dims[1])).to(device)
heatmap_norm = torch.zeros_like(heatmap)
for i in range(b):
for j in range(k):
patch_score = patch_scores[i,j].squeeze()
row_id = int(j // num_patches_y)
col_id = int(j % num_patches_y)
heatmap[i, 0, row_id : row_id + patch_size, col_id : col_id + patch_size] += patch_score
heatmap_norm[i, 0, row_id : row_id + patch_size, col_id : col_id + patch_size] += torch.ones_like(patch_score)
heatmap /= heatmap_norm
heatmap = nn.functional.interpolate(heatmap, (img_size, img_size), mode = 'bilinear')
return heatmap
def vis_multiple_imgs(images, heatmaps, fpath):
"""
Saves multiple *images* with *heatmaps* overlays in *fpath*
"""
b = images.shape[0]
num_rows = int(b**0.5)
num_cols = num_rows
fig = plt.figure(figsize=(8,8), dpi=600)
for i, (img, hmap) in enumerate(zip(images, heatmaps)):
img = img.cpu().numpy()
img = (img - np.min(img)) / np.ptp(img)
img = np.transpose(img, (1, 2, 0))
hmap = hmap.cpu().numpy()
hmap = np.transpose(hmap, (1, 2, 0))
ax = fig.add_subplot(num_rows, num_cols, i + 1)
im_img = ax.imshow(img, alpha = 1., interpolation = 'bilinear', cmap = 'gray')
im_hmap = ax.imshow(hmap, alpha = 0.5, interpolation = 'bilinear', cmap = 'viridis', vmin = 0., vmax = 1.)
ax.tick_params(
axis = 'both',
which = 'both',
bottom = False,
top = False,
left = False,
labelbottom = False,
labelleft = False
)
plt.subplots_adjust(wspace = 0, hspace = 0)
plt.savefig(fpath, bbox_inches = 'tight', pad_inches = 0, format = 'png', transparent = False) | 2,330 | 36 | 126 | py |
evo | evo-master/README.md | # evo
***Python package for the evaluation of odometry and SLAM***
| Linux / macOS / Windows / ROS / ROS2 |
| :---: |
| [](https://dev.azure.com/michl2222/michl2222/_build/latest?definitionId=1&branchName=master) |
This package provides executables and a small library for handling, evaluating and comparing the trajectory output of odometry and SLAM algorithms.
Supported trajectory formats:
* 'TUM' trajectory files
* 'KITTI' pose files
* 'EuRoC MAV' (.csv groundtruth and TUM trajectory file)
* ROS and ROS2 bagfile with `geometry_msgs/PoseStamped`, `geometry_msgs/TransformStamped`, `geometry_msgs/PoseWithCovarianceStamped` or `nav_msgs/Odometry` topics or [TF messages](https://github.com/MichaelGrupp/evo/wiki/Formats#bag---ros-bagfile)
See [here](https://github.com/MichaelGrupp/evo/wiki/Formats) for more infos about the formats.
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_map.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_map.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_violin.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_violin.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/markers.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/markers.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_stats.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_stats.png" alt="evo" height="200" border="5" />
</a>
---
## Why?
evo has several advantages over other public benchmarking tools:
* common tools for different formats
* algorithmic options for association, alignment, scale adjustment for monocular SLAM etc.
* flexible options for output, [plotting](https://github.com/MichaelGrupp/evo/wiki/Plotting) or export (e.g. LaTeX plots or Excel tables)
* a powerful, configurable CLI that can cover many use cases
* modular `core` and `tools` libraries for custom extensions
* faster than other established Python-based tools ([see here](https://github.com/MichaelGrupp/evo/blob/master/doc/performance.md))
**What it's not:** a 1-to-1 re-implementation of a particular evaluation protocol tailored to a specific dataset.
---
## Installation / Upgrade
Installation is easy-peasy if you're familiar with this: https://xkcd.com/1987/#
evo supports **Python 3.8+**. The last evo version that supports **Python 2.7** is `1.12.0`.
You might also want to use a [virtual environment](https://github.com/MichaelGrupp/evo/blob/master/doc/install_in_virtualenv.md).
### From PyPi
If you just want to use the executables of the latest release version, the easiest way is to run:
```bash
pip install evo --upgrade --no-binary evo
```
This will download the package and its dependencies from [PyPI](https://pypi.org/project/evo/) and install or upgrade them. Depending on your OS, you might be able to use `pip2` or `pip3` to specify the Python version you want. Tab completion for Bash terminals is supported via the [argcomplete](https://github.com/kislyuk/argcomplete/) package on most UNIX systems - open a new shell after the installation to use it (without `--no-binary evo` the tab completion might not be installed properly). If you want, you can subscribe to new releases via https://libraries.io/pypi/evo.
### From Source
Run this in the repository's base folder:
```bash
pip install --editable . --upgrade --no-binary evo
```
### Dependencies
**Python packages**
evo has some required dependencies that are ***automatically resolved*** during installation with pip.
They are specified in the `install_requires` part of the `setup.py` file.
**PyQt5 (optional)**
PyQt5 will give you the enhanced GUI for plot figures from the "*Qt5Agg*" matplotlib backend (otherwise: "*TkAgg*"). If PyQt5 is already installed when installing this package, it will be used as a default (see `evo_config show`). To change the plot backend afterwards, run `evo_config set plot_backend Qt5Agg`.
**ROS (optional)**
Some ROS-related features require a ROS installation, see [here](http://www.ros.org/). We are testing this package with ROS Noetic and Iron. Previous versions (`<= 1.12.0`) work with Melodic, Kinetic and Indigo.
---
## Command Line Interface
After installation with setup.py or from pip, the following executables can be called globally from your command-line:
**Metrics:**
* `evo_ape` - absolute pose error
* `evo_rpe` - relative pose error
**Tools:**
* `evo_traj` - tool for analyzing, plotting or exporting one or more trajectories
* `evo_res` - tool for comparing one or multiple result files from `evo_ape` or `evo_rpe`
* `evo_fig` - (experimental) tool for re-opening serialized plots (saved with `--serialize_plot`)
* `evo_config` - tool for global settings and config file manipulation
Call the commands with `--help` to see the options, e.g. `evo_ape --help`. Tab-completion of command line parameters is available on UNIX-like systems.
**More documentation**
Check out the [Wiki on GitHub](https://github.com/MichaelGrupp/evo/wiki).
---
## Example Workflow
There are some example trajectories in the source folder in `test/data`.
### 1.) Plot multiple trajectories
Here, we plot two KITTI pose files and the ground truth using `evo_traj`:
```
cd test/data
evo_traj kitti KITTI_00_ORB.txt KITTI_00_SPTAM.txt --ref=KITTI_00_gt.txt -p --plot_mode=xz
```
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/traj_demo.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/traj_demo.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/traj_demo_xyz.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/traj_demo_xyz.png" alt="evo" height="200" border="5" />
</a>
### 2.) Run a metric on trajectories
For example, here we calculate the absolute pose error for two trajectories from ORB-SLAM and S-PTAM using `evo_ape` (`KITTI_00_gt.txt` is the reference (ground truth)) and plot and save the individual results to .zip files for `evo_res`:
*First trajectory (ORB Stereo):*
```
mkdir results
evo_ape kitti KITTI_00_gt.txt KITTI_00_ORB.txt -va --plot --plot_mode xz --save_results results/ORB.zip
```
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_raw.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_raw.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_map.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_ORB_map.png" alt="evo" height="200" border="5" />
</a>
*Second trajectory (S-PTAM):*
```
evo_ape kitti KITTI_00_gt.txt KITTI_00_SPTAM.txt -va --plot --plot_mode xz --save_results results/SPTAM.zip
```
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_S-PTAM_raw.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_S-PTAM_raw.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_S-PTAM_map.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/ape_demo_S-PTAM_map.png" alt="evo" height="200" border="5" />
</a>
### 3.) Process multiple results from a metric
`evo_res` can be used to compare multiple result files from the metrics, i.e.:
* print infos and statistics (default)
* plot the results
* save the statistics in a table
Here, we use the results from above to generate a plot and a table:
```
evo_res results/*.zip -p --save_table results/table.csv
```
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_raw.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_raw.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_dist.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_dist.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_stats.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_stats.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_box.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_box.png" alt="evo" height="200" border="5" />
</a>
<a href="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_violin.png" target="_blank">
<img src="https://raw.githubusercontent.com/MichaelGrupp/evo/master/doc/assets/res_violin.png" alt="evo" height="200" border="5" />
</a>
---
## IPython / Jupyter Resources
For an interactive source code documentation, open the [Jupyter notebook](http://jupyter.readthedocs.io/en/latest/install.html) `metrics_tutorial.ipynb` in the `notebooks` folder of the repository. More infos on Jupyter notebooks: see [here](https://github.com/MichaelGrupp/evo/blob/master/doc/jupyter_notebook.md)
If you have IPython installed, you can launch an IPython shell with a custom evo profile with the command `evo_ipython`.
---
## Contributing Utilities
A few "inoffical" scripts for special use-cases are collected in the `contrib/` directory of the repository. They are inofficial in the sense that they don't ship with the package distribution and thus aren't regularly tested in continuous integration.
---
## Trouble
*":scream:, this piece of :shit: software doesn't do what I want!!1!1!!"*
**First aid:**
* append `-h`/ `--help` to your command
* check the [Wiki](https://github.com/MichaelGrupp/evo/wiki)
* check the [previous issues](https://github.com/MichaelGrupp/evo/issues?q=is%3Aissue+is%3Aclosed)
* open a [new issue](https://github.com/MichaelGrupp/evo/issues)
---
## Contributing
Patches are welcome, preferably as pull requests.
## License
[GPL-3.0 or later](https://www.gnu.org/licenses/gpl-3.0.html)
If you use this package for your research, a footnote with the link to this repository is appreciated: `github.com/MichaelGrupp/evo`.
...or, for citation with BibTeX:
```
@misc{grupp2017evo,
title={evo: Python package for the evaluation of odometry and SLAM.},
author={Grupp, Michael},
howpublished={\url{https://github.com/MichaelGrupp/evo}},
year={2017}
}
```
| 11,345 | 47.076271 | 580 | md |
evo | evo-master/_config.yml | theme: jekyll-theme-cayman | 26 | 26 | 26 | yml |
evo | evo-master/azure-pipelines.yml | # https://docs.microsoft.com/azure/devops/pipelines/languages/python
# https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/docker
trigger:
- master
pr:
autoCancel: true
# PRs into ...
branches:
include:
- master
schedules:
- cron: "0 0 * * *"
displayName: 'daily build'
branches:
include:
- master
always: true
jobs:
- job: 'Docker'
strategy:
matrix:
ROS-noetic:
dockerfile: 'Dockerfile.rosnoetic'
imageName: 'ubuntu-latest'
ROS-iron:
dockerfile: 'Dockerfile.ros-iron'
imageName: 'ubuntu-latest'
pool:
vmImage: $(imageName)
steps:
# Currently only for testing, so no tagging needed.
- script: docker build . --file $(dockerfile) || exit 1
displayName: 'docker build'
- job: 'Test'
strategy:
matrix:
Python-3.8-ubuntu:
python.version: '3.8'
imageName: 'ubuntu-latest'
Python-3.11-ubuntu:
python.version: '3.11'
imageName: 'ubuntu-latest'
Python-3.11-mac:
python.version: '3.11'
imageName: 'macOS-latest'
Python-3.11-windows:
python.version: '3.11'
imageName: 'windows-latest'
maxParallel: 10
pool:
vmImage: $(imageName)
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '$(python.version)'
architecture: 'x64'
- script: |
python -m pip install --upgrade pip
pip install . --no-binary evo
evo_config show --brief --no_color
displayName: 'Install and configure package and dependencies'
- script: |
pip install mypy types-PyYAML
mypy --ignore-missing-imports evo/ test/ contrib/ doc/ --exclude test/tum_benchmark_tools/
displayName: 'mypy'
- script: |
pip install pytest
pytest -sv --junitxml=junit/test-results.xml
displayName: 'pytest'
- task: PublishTestResults@2
inputs:
testResultsFiles: '**/test-results.xml'
testRunTitle: 'Python $(python.version)'
condition: succeededOrFailed()
- job: 'Publish'
dependsOn: 'Test'
pool:
vmImage: 'ubuntu-latest'
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: '3.x'
architecture: 'x64'
- script: python setup.py sdist
displayName: 'Build sdist'
# Currently, that's it. No actual deployment.
| 2,315 | 21.057143 | 96 | yml |