Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
lightly | lightly-master/lightly/transforms/image_grid_transform.py | from typing import List, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
class ImageGridTransform:
"""Transforms an image into multiple views and grids.
Used for VICRegL.
Attributes:
transforms:
A sequence of (image_grid_transform, view_transform) tuples.
The image_grid_transform creates a new view and grid from the image.
The view_transform further augments the view. Every transform tuple
is applied once to the image, creating len(transforms) views and
grids.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image: Union[Tensor, Image]) -> Union[List[Tensor], List[Image]]:
"""Transforms an image into multiple views.
Every transform in self.transforms creates a new view.
Args:
image:
Image to be transformed into multiple views and grids.
Returns:
List of views and grids tensors or PIL images. In the VICRegL implementation
it has size:
[
[3, global_crop_size, global_crop_size],
[3, local_crop_size, local_crop_size],
[global_grid_size, global_grid_size, 2],
[local_grid_size, local_grid_size, 2]
]
"""
views, grids = [], []
for image_grid_transform, view_transform in self.transforms:
view, grid = image_grid_transform(image)
views.append(view_transform(view))
grids.append(grid)
views += grids
return views
| 1,666 | 31.057692 | 88 | py |
lightly | lightly-master/lightly/transforms/jigsaw.py | # Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
class Jigsaw(object):
"""Implementation of Jigsaw image augmentation, inspired from PyContrast library.
Generates n_grid**2 random crops and returns a list.
This augmentation is instrumental to PIRL.
Attributes:
n_grid:
Side length of the meshgrid, sqrt of the number of crops.
img_size:
Size of image.
crop_size:
Size of crops.
transform:
Transformation to apply on each crop.
Examples:
>>> from lightly.transforms import Jigsaw
>>>
>>> jigsaw_crop = Jigsaw(n_grid=3, img_size=255, crop_size=64, transform=transforms.ToTensor())
>>>
>>> # img is a PIL image
>>> crops = jigsaw_crops(img)
"""
def __init__(
self, n_grid=3, img_size=255, crop_size=64, transform=transforms.ToTensor()
):
self.n_grid = n_grid
self.img_size = img_size
self.crop_size = crop_size
self.grid_size = int(img_size / self.n_grid)
self.side = self.grid_size - self.crop_size
self.transform = transform
yy, xx = np.meshgrid(np.arange(n_grid), np.arange(n_grid))
self.yy = np.reshape(yy * self.grid_size, (n_grid * n_grid,))
self.xx = np.reshape(xx * self.grid_size, (n_grid * n_grid,))
def __call__(self, img):
"""Performs the Jigsaw augmentation
Args:
img:
PIL image to perform Jigsaw augmentation on.
Returns:
Torch tensor with stacked crops.
"""
r_x = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
r_y = np.random.randint(0, self.side + 1, self.n_grid * self.n_grid)
img = np.asarray(img, np.uint8)
crops = []
for i in range(self.n_grid * self.n_grid):
crops.append(
img[
self.xx[i] + r_x[i] : self.xx[i] + r_x[i] + self.crop_size,
self.yy[i] + r_y[i] : self.yy[i] + r_y[i] + self.crop_size,
:,
]
)
crops = [Image.fromarray(crop) for crop in crops]
crops = torch.stack([self.transform(crop) for crop in crops])
crops = crops[np.random.permutation(self.n_grid**2)]
return crops
| 2,448 | 31.653333 | 103 | py |
lightly | lightly-master/lightly/transforms/mae_transform.py | from typing import List, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class MAETransform:
"""Implements the view augmentation for MAE [0].
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 1.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- [0]: Masked Autoencoder, 2021, https://arxiv.org/abs/2111.06377
Attributes:
input_size:
Size of the input image in pixels.
min_scale:
Minimum size of the randomized crop relative to the input_size.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: Union[int, Tuple[int, int]] = 224,
min_scale: float = 0.2,
normalize: dict = IMAGENET_NORMALIZE,
):
transforms = [
T.RandomResizedCrop(
input_size, scale=(min_scale, 1.0), interpolation=3
), # 3 is bicubic
T.RandomHorizontalFlip(),
T.ToTensor(),
]
if normalize:
transforms.append(T.Normalize(mean=normalize["mean"], std=normalize["std"]))
self.transform = T.Compose(transforms)
def __call__(self, image: Union[Tensor, Image]) -> List[Tensor]:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return [self.transform(image)]
| 1,826 | 26.268657 | 88 | py |
lightly | lightly-master/lightly/transforms/moco_transform.py | from typing import Optional, Tuple, Union
from lightly.transforms.simclr_transform import SimCLRTransform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class MoCoV1Transform(SimCLRTransform):
"""Implements the transformations for MoCo v1.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- ImageNet normalization
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.4,
cj_hue: float = 0.4,
min_scale: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.0,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: dict = IMAGENET_NORMALIZE,
):
super().__init__(
input_size=input_size,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
class MoCoV2Transform(SimCLRTransform):
"""Implements the transformations for MoCo v2 [0].
Identical to SimCLRTransform.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- ImageNet normalization
- [0]: MoCo v2, 2020, https://arxiv.org/abs/2003.04297
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value. For datasets with small images,
such as CIFAR, it is recommended to set `cj_strenght` to 0.5.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
| 6,331 | 35.182857 | 119 | py |
lightly | lightly-master/lightly/transforms/msn_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class MSNTransform(MultiViewTransform):
"""Implements the transformations for MSN [0].
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2 * random_views + focal_views. (12 by default)
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- ImageNet normalization
Generates a set of random and focal views for each input image. The generated output
is (views, target, filenames) where views is list with the following entries:
[random_views_0, random_views_1, ..., focal_views_0, focal_views_1, ...].
- [0]: Masked Siamese Networks, 2022: https://arxiv.org/abs/2204.07141
Attributes:
random_size:
Size of the random image views in pixels.
focal_size:
Size of the focal image views in pixels.
random_views:
Number of random views to generate.
focal_views:
Number of focal views to generate.
random_crop_scale:
Minimum and maximum size of the randomized crops for the relative to random_size.
focal_crop_scale:
Minimum and maximum size of the randomized crops relative to focal_size.
cj_prob:
Probability that color jittering is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
random_gray_scale:
Probability of conversion to grayscale.
hf_prob:
Probability that horizontal flip is applied.
vf_prob:
Probability that vertical flip is applied.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
random_size: int = 224,
focal_size: int = 96,
random_views: int = 2,
focal_views: int = 10,
random_crop_scale: Tuple[float, float] = (0.3, 1.0),
focal_crop_scale: Tuple[float, float] = (0.05, 0.3),
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
random_gray_scale: float = 0.2,
hf_prob: float = 0.5,
vf_prob: float = 0.0,
normalize: dict = IMAGENET_NORMALIZE,
):
random_view_transform = MSNViewTransform(
crop_size=random_size,
crop_scale=random_crop_scale,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
random_gray_scale=random_gray_scale,
hf_prob=hf_prob,
vf_prob=vf_prob,
normalize=normalize,
)
focal_view_transform = MSNViewTransform(
crop_size=focal_size,
crop_scale=focal_crop_scale,
cj_prob=cj_prob,
cj_strength=cj_strength,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
random_gray_scale=random_gray_scale,
hf_prob=hf_prob,
vf_prob=vf_prob,
normalize=normalize,
)
transforms = [random_view_transform] * random_views
transforms += [focal_view_transform] * focal_views
super().__init__(transforms=transforms)
class MSNViewTransform:
def __init__(
self,
crop_size: int = 224,
crop_scale: Tuple[float, float] = (0.3, 1.0),
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
random_gray_scale: float = 0.2,
hf_prob: float = 0.5,
vf_prob: float = 0.0,
normalize: dict = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transform = [
T.RandomResizedCrop(size=crop_size, scale=crop_scale),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
self.transform = T.Compose(transform)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,537 | 33.962567 | 119 | py |
lightly | lightly-master/lightly/transforms/multi_crop_transform.py | from typing import Tuple
import torchvision.transforms as T
from lightly.transforms.multi_view_transform import MultiViewTransform
class MultiCropTranform(MultiViewTransform):
"""Implements the multi-crop transformations. Used by Swav.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length crop_counts.
Applies the following augmentations by default:
- Random resized crop
- transforms passed by constructor
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
transforms:
Transforms which are applied to all crops.
"""
def __init__(
self,
crop_sizes: Tuple[int],
crop_counts: Tuple[int],
crop_min_scales: Tuple[float],
crop_max_scales: Tuple[float],
transforms,
):
if len(crop_sizes) != len(crop_counts):
raise ValueError(
"Length of crop_sizes and crop_counts must be equal but are"
f" {len(crop_sizes)} and {len(crop_counts)}."
)
if len(crop_sizes) != len(crop_min_scales):
raise ValueError(
"Length of crop_sizes and crop_min_scales must be equal but are"
f" {len(crop_sizes)} and {len(crop_min_scales)}."
)
if len(crop_sizes) != len(crop_min_scales):
raise ValueError(
"Length of crop_sizes and crop_max_scales must be equal but are"
f" {len(crop_sizes)} and {len(crop_min_scales)}."
)
crop_transforms = []
for i in range(len(crop_sizes)):
random_resized_crop = T.RandomResizedCrop(
crop_sizes[i], scale=(crop_min_scales[i], crop_max_scales[i])
)
crop_transforms.extend(
[
T.Compose(
[
random_resized_crop,
transforms,
]
)
]
* crop_counts[i]
)
super().__init__(crop_transforms)
| 2,408 | 30.285714 | 80 | py |
lightly | lightly-master/lightly/transforms/multi_view_transform.py | from typing import List, Union
from PIL.Image import Image
from torch import Tensor
class MultiViewTransform:
"""Transforms an image into multiple views.
Args:
transforms:
A sequence of transforms. Every transform creates a new view.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image: Union[Tensor, Image]) -> Union[List[Tensor], List[Image]]:
"""Transforms an image into multiple views.
Every transform in self.transforms creates a new view.
Args:
image:
Image to be transformed into multiple views.
Returns:
List of views.
"""
return [transform(image) for transform in self.transforms]
| 775 | 22.515152 | 88 | py |
lightly | lightly-master/lightly/transforms/pirl_transform.py | from typing import Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.jigsaw import Jigsaw
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class PIRLTransform(MultiViewTransform):
"""Implements the transformations for PIRL [0]. The jigsaw augmentation
is applied during the forward pass.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2 (original, augmented).
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Jigsaw puzzle
- [0] PIRL, 2019: https://arxiv.org/abs/1912.01991
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
hf_prob:
Probability that horizontal flip is applied.
n_grid:
Sqrt of the number of grids in the jigsaw image.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: Union[int, Tuple[int, int]] = 64,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.4,
cj_hue: float = 0.4,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
hf_prob: float = 0.5,
n_grid: int = 3,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
if isinstance(input_size, tuple):
input_size_ = max(input_size)
else:
input_size_ = input_size
# Cropping and normalisation for non-transformed image
no_augment = T.Compose(
[
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
# Transform for transformed jigsaw image
transform = [
T.RandomHorizontalFlip(p=hf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
jigsaw = Jigsaw(
n_grid=n_grid,
img_size=input_size_,
crop_size=int(input_size_ // n_grid),
transform=T.Compose(transform),
)
super().__init__([no_augment, jigsaw])
| 3,654 | 30.508621 | 84 | py |
lightly | lightly-master/lightly/transforms/random_crop_and_flip_with_grid.py | from dataclasses import dataclass
from typing import Dict, List, Tuple
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from PIL import Image
from torch import nn
@dataclass
class Location:
# The row index of the top-left corner of the crop.
top: float
# The column index of the top-left corner of the crop.
left: float
# The height of the crop.
height: float
# The width of the crop.
width: float
# The height of the original image.
image_height: float
# The width of the original image.
image_width: float
# Whether to flip the image horizontally.
horizontal_flip: bool = False
# Whether to flip the image vertically.
vertical_flip: bool = False
class RandomResizedCropWithLocation(T.RandomResizedCrop):
"""
Do a random resized crop and return both the resulting image and the location. See base class.
"""
def forward(self, img: Image.Image) -> Tuple[Image.Image, Location]:
"""
Args:
img (PIL Image or Tensor): Image to be cropped.
Returns:
PIL Image or Tensor: Randomly cropped image
Location: Location object containing crop parameters
"""
top, left, height, width = self.get_params(img, self.scale, self.ratio)
image_width, image_height = T.functional.get_image_size(img)
location = Location(
top=top,
left=left,
height=height,
width=width,
image_height=image_height,
image_width=image_width,
)
img = T.functional.resized_crop(
img, top, left, height, width, self.size, self.interpolation
)
return img, location
class RandomHorizontalFlipWithLocation(T.RandomHorizontalFlip):
"""See base class."""
def forward(
self, img: Image.Image, location: Location
) -> Tuple[Image.Image, Location]:
"""Horizontal flip image.
Horizontally flip the given image randomly with a given probability and
return both the resulting image and the location.
Args:
img (PIL Image or Tensor): Image to be flipped..
Location: Location object linked to the image
Returns:
PIL Image or Tensor: Randomly flipped image
Location: Location object with updated location.horizontal_flip parameter
"""
if torch.rand(1) < self.p:
img = F.hflip(img)
location.horizontal_flip = True
return img, location
class RandomVerticalFlipWithLocation(T.RandomVerticalFlip):
"""See base class."""
def forward(
self, img: Image.Image, location: Location
) -> Tuple[Image.Image, Location]:
"""Vertical flip image.
Vertically flip the given image randomly with a given probability and
return both the resulting image and the location.
Args:
img (PIL Image or Tensor): Image to be flipped..
Location: Location object linked to the image
Returns:
PIL Image or Tensor: Randomly flipped image
Location: Location object with updated location.vertical_flip parameter
"""
if torch.rand(1) < self.p:
img = F.vflip(img)
location.vertical_flip = True
return img, location
class RandomResizedCropAndFlip(nn.Module):
"""Randomly flip and crop an image.
A PyTorch module that applies random cropping, horizontal and vertical flipping to an image,
and returns the transformed image and a grid tensor used to map the image back to the
original image space in an NxN grid.
Args:
grid_size:
The number of grid cells in the output grid tensor.
crop_size:
The size (in pixels) of the random crops.
crop_min_scale:
The minimum scale factor for random resized crops.
crop_max_scale:
The maximum scale factor for random resized crops.
hf_prob:
The probability of applying horizontal flipping to the image.
normalize:
A dictionary containing the mean and std values for normalizing the image.
"""
def __init__(
self,
grid_size: int = 7,
crop_size: int = 224,
crop_min_scale: float = 0.05,
crop_max_scale: float = 0.2,
hf_prob: float = 0.5,
vf_prob: float = 0.5,
):
super().__init__()
self.grid_size = grid_size
self.crop_size = crop_size
self.crop_min_scale = crop_min_scale
self.crop_max_scale = crop_max_scale
self.hf_prob = hf_prob
self.vf_prob = vf_prob
self.resized_crop = RandomResizedCropWithLocation(
size=self.crop_size, scale=(self.crop_min_scale, self.crop_max_scale)
)
self.horizontal_flip = RandomHorizontalFlipWithLocation(self.hf_prob)
self.vertical_flip = RandomVerticalFlipWithLocation(self.vf_prob)
def forward(self, img: Image.Image) -> Tuple[Image.Image, torch.Tensor]:
"""Applies random cropping and horizontal flipping to an image, and returns the
transformed image and a grid tensor used to map the image back to the original image
space in an NxN grid.
Args:
img: The input PIL image.
Returns:
A tuple containing the transformed PIL image and the grid tensor.
"""
img, location = self.resized_crop.forward(img=img)
img, location = self.horizontal_flip.forward(img, location)
img, location = self.vertical_flip.forward(img, location)
grid = self.location_to_NxN_grid(location=location)
return img, grid
def location_to_NxN_grid(self, location: Location) -> torch.Tensor:
"""Create grid from location object.
Create a grid tensor with grid_size rows and grid_size columns, where each cell represents a region of
the original image. The grid is used to map the cropped and transformed image back to the
original image space.
Args:
location: An instance of the Location class, containing the location and size of the
transformed image in the original image space.
Returns:
A grid tensor of shape (grid_size, grid_size, 2), where the last dimension represents the (x, y) coordinate
of the center of each cell in the original image space.
"""
cell_width = location.width / self.grid_size
cell_height = location.height / self.grid_size
x = torch.linspace(
location.left, location.left + location.width, self.grid_size
) + (cell_width / 2)
y = torch.linspace(
location.top, location.top + location.height, self.grid_size
) + (cell_height / 2)
if location.horizontal_flip:
x = torch.flip(x, dims=[0])
if location.vertical_flip:
y = torch.flip(y, dims=[0])
grid_x, grid_y = torch.meshgrid(x, y, indexing="xy")
return torch.stack([grid_x, grid_y], dim=-1)
| 7,147 | 33.868293 | 119 | py |
lightly | lightly-master/lightly/transforms/rotation.py | # Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from typing import Tuple, Union
import numpy as np
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from torchvision.transforms import functional as TF
class RandomRotate:
"""Implementation of random rotation.
Randomly rotates an input image by a fixed angle. By default, we rotate
the image by 90 degrees with a probability of 50%.
This augmentation can be very useful for rotation invariant images such as
in medical imaging or satellite imaginary.
Attributes:
prob:
Probability with which image is rotated.
angle:
Angle by which the image is rotated. We recommend multiples of 90
to prevent rasterization artifacts. If you pick numbers like
90, 180, 270 the tensor will be rotated without introducing
any artifacts.
"""
def __init__(self, prob: float = 0.5, angle: int = 90):
self.prob = prob
self.angle = angle
def __call__(self, image: Union[Image, Tensor]) -> Union[Image, Tensor]:
"""Rotates the image with a given probability.
Args:
image:
PIL image or tensor which will be rotated.
Returns:
Rotated image or original image.
"""
prob = np.random.random_sample()
if prob < self.prob:
image = TF.rotate(image, self.angle)
return image
class RandomRotateDegrees:
"""Random rotate image between two rotation angles with a random probability.
Attributes:
prob:
Probability with which image is rotated.
degrees:
Range of degrees to select from. If degrees is a number instead of a sequence like (min, max),
the range of degrees will be (-degrees, +degrees). The image is rotated counter-clockwise with
a random angle in the (min, max) range or in the (-degrees, +degrees) range.
"""
def __init__(self, prob: float, degrees: Union[float, Tuple[float, float]]):
self.transform = T.RandomApply([T.RandomRotation(degrees=degrees)], p=prob)
def __call__(self, image: Union[Image, Tensor]) -> Union[Image, Tensor]:
"""Rotates the images with a given probability.
Args:
image:
PIL image or tensor which will be rotated.
Returns:
Rotated image or original image.
"""
return self.transform(image)
def random_rotation_transform(
rr_prob: float,
rr_degrees: Union[None, float, Tuple[float, float]],
) -> Union[RandomRotate, T.RandomApply]:
if rr_degrees is None:
# Random rotation by 90 degrees.
return RandomRotate(prob=rr_prob, angle=90)
else:
# Random rotation with random angle defined by rr_degrees.
return RandomRotateDegrees(prob=rr_prob, degrees=rr_degrees)
| 2,951 | 30.404255 | 106 | py |
lightly | lightly-master/lightly/transforms/simclr_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class SimCLRTransform(MultiViewTransform):
"""Implements the transformations for SimCLR [0, 1].
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- ImageNet normalization
Note that SimCLR v1 and v2 use the same data augmentations.
- [0]: SimCLR v1, 2020, https://arxiv.org/abs/2002.05709
- [1]: SimCLR v2, 2020, https://arxiv.org/abs/2006.10029
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of [tensor, tensor].
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value. For datasets with small images,
such as CIFAR, it is recommended to set `cj_strenght` to 0.5.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
view_transform = SimCLRViewTransform(
input_size=input_size,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
super().__init__(transforms=[view_transform, view_transform])
class SimCLRViewTransform:
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transform = [
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transform)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,447 | 34.043478 | 119 | py |
lightly | lightly-master/lightly/transforms/simsiam_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class SimSiamTransform(MultiViewTransform):
"""Implements the transformations for SimSiam.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- ImageNet normalization
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value. For datasets with small images,
such as CIFAR, it is recommended to set `cj_strength` to 0.5.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.4,
cj_hue: float = 0.1,
min_scale: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
view_transform = SimSiamViewTransform(
input_size=input_size,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
super().__init__(transforms=[view_transform, view_transform])
class SimSiamViewTransform:
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.4,
cj_hue: float = 0.1,
min_scale: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transform = [
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transform)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,130 | 34.439306 | 119 | py |
lightly | lightly-master/lightly/transforms/smog_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.solarize import RandomSolarization
from lightly.transforms.utils import IMAGENET_NORMALIZE
class SMoGTransform(MultiViewTransform):
"""Implements the transformations for SMoG.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length sum(crop_counts). (8 by default)
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- Random solarization
- ImageNet normalization
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
gaussian_blur_probs:
Probability of Gaussian blur for each crop category.
gaussian_blur_kernel_sizes:
Deprecated values in favour of sigmas.
gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
solarize_probs:
Probability of solarization for each crop category.
hf_prob:
Probability that horizontal flip is applied.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
random_gray_scale:
Probability of conversion to grayscale.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
crop_sizes: Tuple[int, int] = (224, 96),
crop_counts: Tuple[int, int] = (4, 4),
crop_min_scales: Tuple[float, float] = (0.2, 0.05),
crop_max_scales: Tuple[float, float] = (1.0, 0.2),
gaussian_blur_probs: Tuple[float, float] = (0.5, 0.1),
gaussian_blur_kernel_sizes: Tuple[Optional[float], Optional[float]] = (
None,
None,
),
gaussian_blur_sigmas: Tuple[float, float] = (0.1, 2),
solarize_probs: Tuple[float, float] = (0.0, 0.2),
hf_prob: float = 0.5,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
transforms = []
for i in range(len(crop_sizes)):
transforms.extend(
[
SmoGViewTransform(
crop_size=crop_sizes[i],
crop_min_scale=crop_min_scales[i],
crop_max_scale=crop_max_scales[i],
gaussian_blur_prob=gaussian_blur_probs[i],
kernel_size=gaussian_blur_kernel_sizes[i],
sigmas=gaussian_blur_sigmas,
solarize_prob=solarize_probs[i],
hf_prob=hf_prob,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
random_gray_scale=random_gray_scale,
normalize=normalize,
)
]
* crop_counts[i]
)
super().__init__(transforms)
class SmoGViewTransform:
def __init__(
self,
crop_size: int = 224,
crop_min_scale: float = 0.2,
crop_max_scale: float = 1.0,
gaussian_blur_prob: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
solarize_prob: float = 0.0,
hf_prob: float = 0.5,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transform = [
T.RandomResizedCrop(crop_size, scale=(crop_min_scale, crop_max_scale)),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=kernel_size,
prob=gaussian_blur_prob,
sigmas=sigmas,
),
RandomSolarization(prob=solarize_prob),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transform)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,163 | 33.435754 | 92 | py |
lightly | lightly-master/lightly/transforms/solarize.py | # Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
import numpy as np
from PIL import ImageOps
class RandomSolarization(object):
"""Implementation of random image Solarization.
Utilizes the integrated image operation `solarize` from Pillow. Solarization
inverts all pixel values above a threshold (default: 128).
Attributes:
probability:
Probability to apply the transformation
threshold:
Threshold for solarization.
"""
def __init__(self, prob: float = 0.5, threshold: int = 128):
self.prob = prob
self.threshold = threshold
def __call__(self, sample):
"""Solarizes the given input image
Args:
sample:
PIL image to which solarize will be applied.
Returns:
Solarized image or original image.
"""
prob = np.random.random_sample()
if prob < self.prob:
# return solarized image
return ImageOps.solarize(sample, threshold=self.threshold)
# return original image
return sample
| 1,118 | 25.642857 | 80 | py |
lightly | lightly-master/lightly/transforms/swav_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_crop_transform import MultiCropTranform
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.utils import IMAGENET_NORMALIZE
class SwaVTransform(MultiCropTranform):
"""Implements the multi-crop transformations for SwaV.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length sum(crop_counts). (8 by default)
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- ImageNet normalization
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
hf_prob:
Probability that horizontal flip is applied.
vf_prob:
Probability that vertical flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
crop_sizes: Tuple[int, int] = (224, 96),
crop_counts: Tuple[int, int] = (2, 6),
crop_min_scales: Tuple[float, float] = (0.14, 0.05),
crop_max_scales: Tuple[float, float] = (1.0, 0.14),
hf_prob: float = 0.5,
vf_prob: float = 0.0,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
transforms = SwaVViewTransform(
hf_prob=hf_prob,
vf_prob=vf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
normalize=normalize,
)
super().__init__(
crop_sizes=crop_sizes,
crop_counts=crop_counts,
crop_min_scales=crop_min_scales,
crop_max_scales=crop_max_scales,
transforms=transforms,
)
class SwaVViewTransform:
def __init__(
self,
hf_prob: float = 0.5,
vf_prob: float = 0.0,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
cj_prob: float = 0.8,
cj_strength: float = 1.0,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.8,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transforms = [
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.ColorJitter(),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transforms += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transforms)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,338 | 33.82967 | 119 | py |
lightly | lightly-master/lightly/transforms/utils.py | IMAGENET_NORMALIZE = {"mean": [0.485, 0.456, 0.406], "std": [0.229, 0.224, 0.225]}
| 83 | 41 | 82 | py |
lightly | lightly-master/lightly/transforms/vicreg_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.multi_view_transform import MultiViewTransform
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.solarize import RandomSolarization
from lightly.transforms.utils import IMAGENET_NORMALIZE
class VICRegTransform(MultiViewTransform):
"""Implements the transformations for VICReg.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length 2.
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Random solarization
- Gaussian blur
- ImageNet normalization
Similar to SimCLR transform but with extra solarization.
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
solarize_prob:
Probability of solarization.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
solarize_prob: float = 0.1,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
view_transform = VICRegViewTransform(
input_size=input_size,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
solarize_prob=solarize_prob,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
super().__init__(transforms=[view_transform, view_transform])
class VICRegViewTransform:
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
solarize_prob: float = 0.1,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transform = [
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
RandomSolarization(prob=solarize_prob),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transform)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""
Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 6,402 | 34.181319 | 119 | py |
lightly | lightly-master/lightly/transforms/vicregl_transform.py | from typing import Optional, Tuple, Union
import torchvision.transforms as T
from PIL.Image import Image
from torch import Tensor
from lightly.transforms.gaussian_blur import GaussianBlur
from lightly.transforms.image_grid_transform import ImageGridTransform
from lightly.transforms.random_crop_and_flip_with_grid import RandomResizedCropAndFlip
from lightly.transforms.solarize import RandomSolarization
from lightly.transforms.utils import IMAGENET_NORMALIZE
class VICRegLTransform(ImageGridTransform):
"""Transforms images for VICRegL.
Input to this transform:
PIL Image or Tensor.
Output of this transform:
List of Tensor of length n_global_views + n_local_views. (8 by default)
Applies the following augmentations by default:
- Random resized crop
- Random horizontal flip
- Color jitter
- Random gray scale
- Gaussian blur
- Random solarization
- ImageNet normalization
- [0]: VICRegL, 2022, https://arxiv.org/abs/2210.01571
Attributes:
global_crop_size:
Size of the input image in pixels for the global crop views.
local_crop_size:
Size of the input image in pixels for the local crop views.
n_global_views:
Number of global crop views to generate.
n_local_views:
Number of local crop views to generate. For ResNet backbones it is
recommended to set this to 0, see [0].
global_crop_scale:
Min and max scales for the global crop views.
local_crop_scale:
Min and max scales for the local crop views.
global_grid_size:
Grid size for the global crop views.
local_grid_size:
Grid size for the local crop views.
global_gaussian_blur_prob:
Probability of Gaussian blur for the global crop views.
local_gaussian_blur_prob:
Probability of Gaussian blur for the local crop views.
global_gaussian_blur_kernel_size:
Will be deprecated in favor of `global_gaussian_blur_sigmas` argument.
If set, the old behavior applies and `global_gaussian_blur_sigmas`
is ignored. Used to calculate sigma of gaussian blur with
global_gaussian_blur_kernel_size * input_size. Applied to global crop views.
local_gaussian_blur_kernel_size:
Will be deprecated in favor of `local_gaussian_blur_sigmas` argument.
If set, the old behavior applies and `local_gaussian_blur_sigmas`
is ignored. Used to calculate sigma of gaussian blur with
local_gaussian_blur_kernel_size * input_size. Applied to local crop views.
global_gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel
is sampled. It is ignored if `global_gaussian_blur_kernel_size` is set.
Applied to global crop views.
local_gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel
is sampled. It is ignored if `local_gaussian_blur_kernel_size` is set.
Applied to local crop views.
global_solarize_prob:
Probability of solarization for the global crop views.
local_solarize_prob:
Probability of solarization for the local crop views.
hf_prob:
Probability that horizontal flip is applied.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter. `cj_bright`, `cj_contrast`, `cj_sat`, and
`cj_hue` are multiplied by this value.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
random_gray_scale:
Probability of conversion to grayscale.
normalize:
Dictionary with mean and standard deviation for normalization.
"""
def __init__(
self,
global_crop_size: int = 224,
local_crop_size: int = 96,
n_global_views: int = 2,
n_local_views: int = 6,
global_crop_scale: Tuple[float, float] = (0.2, 1.0),
local_crop_scale: Tuple[float, float] = (0.05, 0.2),
global_grid_size: int = 7,
local_grid_size: int = 3,
global_gaussian_blur_prob: float = 0.5,
local_gaussian_blur_prob: float = 0.1,
global_gaussian_blur_kernel_size: Optional[float] = None,
local_gaussian_blur_kernel_size: Optional[float] = None,
global_gaussian_blur_sigmas: Tuple[float, float] = (0.1, 2),
local_gaussian_blur_sigmas: Tuple[float, float] = (0.1, 2),
global_solarize_prob: float = 0.0,
local_solarize_prob: float = 0.2,
hf_prob: float = 0.5,
vf_prob: float = 0.0,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
global_transform = (
RandomResizedCropAndFlip(
crop_size=global_crop_size,
crop_min_scale=global_crop_scale[0],
crop_max_scale=global_crop_scale[1],
hf_prob=hf_prob,
vf_prob=vf_prob,
grid_size=global_grid_size,
),
VICRegLViewTransform(
gaussian_blur_prob=global_gaussian_blur_prob,
gaussian_blur_kernel_size=global_gaussian_blur_kernel_size,
gaussian_blur_sigmas=global_gaussian_blur_sigmas,
solarize_prob=global_solarize_prob,
cj_prob=cj_prob,
cj_strength=cj_strength,
cj_bright=cj_bright,
cj_contrast=cj_contrast,
cj_sat=cj_sat,
cj_hue=cj_hue,
random_gray_scale=random_gray_scale,
normalize=normalize,
),
)
local_transform = (
RandomResizedCropAndFlip(
crop_size=local_crop_size,
crop_min_scale=local_crop_scale[0],
crop_max_scale=local_crop_scale[1],
hf_prob=hf_prob,
grid_size=local_grid_size,
),
VICRegLViewTransform(
gaussian_blur_prob=local_gaussian_blur_prob,
gaussian_blur_kernel_size=local_gaussian_blur_kernel_size,
gaussian_blur_sigmas=local_gaussian_blur_sigmas,
solarize_prob=local_solarize_prob,
cj_prob=cj_prob,
cj_strength=cj_strength,
random_gray_scale=random_gray_scale,
normalize=normalize,
),
)
transforms = [global_transform] * n_global_views + [
local_transform
] * n_local_views
super().__init__(transforms=transforms)
class VICRegLViewTransform:
def __init__(
self,
gaussian_blur_prob: float = 0.5,
gaussian_blur_kernel_size: Optional[float] = None,
gaussian_blur_sigmas: Tuple[float, float] = (0.1, 2),
solarize_prob: float = 0.0,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
cj_bright: float = 0.8,
cj_contrast: float = 0.8,
cj_sat: float = 0.4,
cj_hue: float = 0.2,
random_gray_scale: float = 0.2,
normalize: Union[None, dict] = IMAGENET_NORMALIZE,
):
color_jitter = T.ColorJitter(
brightness=cj_strength * cj_bright,
contrast=cj_strength * cj_contrast,
saturation=cj_strength * cj_sat,
hue=cj_strength * cj_hue,
)
transforms = [
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=gaussian_blur_kernel_size,
prob=gaussian_blur_prob,
sigmas=gaussian_blur_sigmas,
),
RandomSolarization(prob=solarize_prob),
T.ToTensor(),
]
if normalize:
transforms += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
self.transform = T.Compose(transforms=transforms)
def __call__(self, image: Union[Tensor, Image]) -> Tensor:
"""Applies the transforms to the input image.
Args:
image:
The input image to apply the transforms to.
Returns:
The transformed image.
"""
return self.transform(image)
| 8,838 | 37.938326 | 88 | py |
lightly | lightly-master/lightly/utils/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/lightly/utils/bounding_box.py | """ Bounding Box Utils """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
class BoundingBox:
"""Class which unifies different bounding box formats.
Attributes:
x0:
x0 coordinate (normalized to [0, 1])
y0:
y0 coordinate (normalized to [0, 1])
x1:
x1 coordinate (normalized to [0, 1])
y1:
y1 coordinate (normalized to [0, 1])
Examples:
>>> # simple case, format (x0, y0, x1, y1)
>>> bbox = BoundingBox(0.1, 0.2, 0.3, 0.4)
>>>
>>> # same bounding box in x, y, w, h format
>>> bbox = BoundingBox.from_x_y_w_h(0.1, 0.2, 0.2, 0.2)
>>>
>>> # often the coordinates are not yet normalized by image size
>>> # for example, for a 100 x 100 image, the coordinates could be
>>> # (x0, y0, x1, y1) = (10, 20, 30, 40)
>>> W, H = 100, 100 # get image shape
>>> bbox = BoundingBox(10 / W, 20 / H, 30 / W, 40 / H)
"""
def __init__(
self, x0: float, y0: float, x1: float, y1: float, clip_values: bool = True
):
"""
clip_values:
Set to true to clip the values into [0, 1] instead of raising an error if they lie outside.
"""
if clip_values:
def clip_to_0_1(value):
return min(1, max(0, value))
x0 = clip_to_0_1(x0)
y0 = clip_to_0_1(y0)
x1 = clip_to_0_1(x1)
y1 = clip_to_0_1(y1)
if x0 > 1 or x1 > 1 or y0 > 1 or y1 > 1 or x0 < 0 or x1 < 0 or y0 < 0 or y1 < 0:
raise ValueError(
f"Bounding Box Coordinates must be relative to "
f"image width and height but are ({x0}, {y0}, {x1}, {y1})."
)
if x0 >= x1:
raise ValueError(
f"x0 must be smaller than x1 for bounding box "
f"[{x0}, {y0}, {x1}, {y1}]"
)
if y0 >= y1:
raise ValueError(
"y0 must be smaller than y1 for bounding box "
f"[{x0}, {y0}, {x1}, {y1}]"
)
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
@classmethod
def from_x_y_w_h(cls, x: float, y: float, w: float, h: float):
"""Helper to convert from bounding box format with width and height.
Examples:
>>> bbox = BoundingBox.from_x_y_w_h(0.1, 0.2, 0.2, 0.2)
"""
return cls(x, y, x + w, y + h)
@classmethod
def from_yolo_label(cls, x_center: float, y_center: float, w: float, h: float):
"""Helper to convert from yolo label format
x_center, y_center, w, h --> x0, y0, x1, y1
Examples:
>>> bbox = BoundingBox.from_yolo(0.5, 0.4, 0.2, 0.3)
"""
return cls(
x_center - w / 2,
y_center - h / 2,
x_center + w / 2,
y_center + h / 2,
clip_values=True,
)
@property
def width(self):
"""Returns the width of the bounding box relative to the image size."""
return self.x1 - self.x0
@property
def height(self):
"""Returns the height of the bounding box relative to the image size."""
return self.y1 - self.y0
@property
def area(self):
"""Returns the area of the bounding box relative to the area of the image."""
return self.width * self.height
| 3,469 | 28.40678 | 103 | py |
lightly | lightly-master/lightly/utils/debug.py | from typing import List, Union
import torch
import torchvision
from PIL import Image
from lightly.data.collate import BaseCollateFunction, MultiViewCollateFunction
try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
plt = ModuleNotFoundError(
"Matplotlib is not installed on your system. Please install it to use the plotting"
"functionalities. See https://matplotlib.org/ for installation instructions."
)
def _check_matplotlib_available() -> None:
if isinstance(plt, Exception):
raise plt
@torch.no_grad()
def std_of_l2_normalized(z: torch.Tensor) -> torch.Tensor:
"""Calculates the mean of the standard deviation of z along each dimension.
This measure was used by [0] to determine the level of collapse of the
learned representations. If the returned number is 0., the outputs z have
collapsed to a constant vector. "If the output z has a zero-mean isotropic
Gaussian distribution" [0], the returned number should be close to 1/sqrt(d)
where d is the dimensionality of the output.
[0]: https://arxiv.org/abs/2011.10566
Args:
z:
A torch tensor of shape batch_size x dimension.
Returns:
The mean of the standard deviation of the l2 normalized tensor z along
each dimension.
"""
if len(z.shape) != 2:
raise ValueError(
f"Input tensor must have two dimensions but has {len(z.shape)}!"
)
z_norm = torch.nn.functional.normalize(z, dim=1)
return torch.std(z_norm, dim=0).mean()
def apply_transform_without_normalize(
image: Image.Image,
transform,
):
"""Applies the transform to the image but skips ToTensor and Normalize."""
skippable_transforms = (
torchvision.transforms.ToTensor,
torchvision.transforms.Normalize,
)
if isinstance(transform, torchvision.transforms.Compose):
for transform_ in transform.transforms:
image = apply_transform_without_normalize(image, transform_)
elif not isinstance(transform, skippable_transforms):
image = transform(image)
return image
def generate_grid_of_augmented_images(
input_images: List[Image.Image],
collate_function: Union[BaseCollateFunction, MultiViewCollateFunction],
):
"""Returns a grid of augmented images. Images in a column belong together.
This function ignores the transforms ToTensor and Normalize for visualization purposes.
Args:
input_images:
List of PIL images for which the augmentations should be plotted.
collate_function:
The collate function of the self-supervised learning algorithm.
Must be of type BaseCollateFunction or MultiViewCollateFunction.
Returns:
A grid of augmented images. Images in a column belong together.
"""
grid = []
if isinstance(collate_function, BaseCollateFunction):
for _ in range(2):
grid.append(
[
apply_transform_without_normalize(image, collate_function.transform)
for image in input_images
]
)
elif isinstance(collate_function, MultiViewCollateFunction):
for transform in collate_function.transforms:
grid.append(
[
apply_transform_without_normalize(image, transform)
for image in input_images
]
)
else:
raise ValueError(
"Collate function must be one of "
"(BaseCollateFunction, MultiViewCollateFunction) "
f"but is {type(collate_function)}."
)
return grid
def plot_augmented_images(
input_images: List[Image.Image],
collate_function: Union[BaseCollateFunction, MultiViewCollateFunction],
):
"""Returns a figure showing original images in the left column and augmented images to their right.
This function ignores the transforms ToTensor and Normalize for visualization purposes.
Args:
input_images:
List of PIL images for which the augmentations should be plotted.
collate_function:
The collate function of the self-supervised learning algorithm.
Must be of type BaseCollateFunction or MultiViewCollateFunction.
Returns:
A figure showing the original images in the left column and the augmented
images to their right. If the collate_function is an instance of the
BaseCollateFunction, two example augmentations are shown. For
MultiViewCollateFunctions all the generated views are shown.
"""
_check_matplotlib_available()
if len(input_images) == 0:
raise ValueError("There must be at least one input image.")
grid = generate_grid_of_augmented_images(input_images, collate_function)
grid.insert(0, input_images)
nrows = len(input_images)
ncols = len(grid)
fig, axs = plt.subplots(nrows, ncols, figsize=(ncols * 1.5, nrows * 1.5))
for i in range(nrows):
for j in range(ncols):
ax = axs[i][j] if len(input_images) > 1 else axs[j]
img = grid[j][i]
ax.imshow(img)
ax.set_axis_off()
ax_top_left = axs[0, 0] if len(input_images) > 1 else axs[0]
ax_top_left.set(title="Original images")
ax_top_left.title.set_size(8)
ax_top_next = axs[0, 1] if len(input_images) > 1 else axs[1]
ax_top_next.set(title="Augmented images")
ax_top_next.title.set_size(8)
fig.tight_layout()
return fig
| 5,552 | 32.251497 | 103 | py |
lightly | lightly-master/lightly/utils/dist.py | from typing import Optional, Tuple
import torch
import torch.distributed as dist
class GatherLayer(torch.autograd.Function):
"""Gather tensors from all processes, supporting backward propagation.
This code was taken and adapted from here:
https://github.com/Spijkervet/SimCLR
"""
@staticmethod
def forward(ctx, input: torch.Tensor) -> Tuple[torch.Tensor, ...]:
ctx.save_for_backward(input)
output = [torch.empty_like(input) for _ in range(dist.get_world_size())]
dist.all_gather(output, input)
return tuple(output)
@staticmethod
def backward(ctx, *grads: torch.Tensor) -> torch.Tensor:
(input,) = ctx.saved_tensors
grad_out = torch.empty_like(input)
grad_out[:] = grads[dist.get_rank()]
return grad_out
def rank() -> int:
"""Returns the rank of the current process."""
return dist.get_rank() if dist.is_initialized() else 0
def world_size() -> int:
"""Returns the current world size (number of distributed processes)."""
return dist.get_world_size() if dist.is_initialized() else 1
def gather(input: torch.Tensor) -> Tuple[torch.Tensor]:
"""Gathers this tensor from all processes. Supports backprop."""
return GatherLayer.apply(input)
def eye_rank(n: int, device: Optional[torch.device] = None) -> torch.Tensor:
"""Returns an (n, n * world_size) zero matrix with the diagonal for the rank
of this process set to 1.
Example output where n=3, the current process has rank 1, and there are
4 processes in total:
rank0 rank1 rank2 rank3
0 0 0 | 1 0 0 | 0 0 0 | 0 0 0
0 0 0 | 0 1 0 | 0 0 0 | 0 0 0
0 0 0 | 0 0 1 | 0 0 0 | 0 0 0
Equivalent to torch.eye for undistributed settings or if world size == 1.
Args:
n:
Size of the square matrix on a single process.
device:
Device on which the matrix should be created.
"""
rows = torch.arange(n, device=device, dtype=torch.long)
cols = rows + rank() * n
diag_mask = torch.zeros((n, n * world_size()), dtype=torch.bool)
diag_mask[(rows, cols)] = True
return diag_mask
| 2,171 | 29.591549 | 80 | py |
lightly | lightly-master/lightly/utils/embeddings_2d.py | """ Transform embeddings to two-dimensional space for visualization. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import numpy as np
class PCA(object):
"""Handmade PCA to bypass sklearn dependency.
Attributes:
n_components:
Number of principal components to keep.
eps:
Epsilon for numerical stability.
"""
def __init__(self, n_components: int = 2, eps: float = 1e-10):
self.n_components = n_components
self.mean = None
self.w = None
self.eps = eps
def fit(self, X: np.ndarray):
"""Fits PCA to data in X.
Args:
X:
Datapoints stored in numpy array of size n x d.
Returns:
PCA object to transform datapoints.
"""
X = X.astype(np.float32)
self.mean = X.mean(axis=0)
X = X - self.mean + self.eps
cov = np.cov(X.T) / X.shape[0]
v, w = np.linalg.eig(cov)
idx = v.argsort()[::-1]
v, w = v[idx], w[:, idx]
self.w = w
return self
def transform(self, X: np.ndarray):
"""Uses PCA to transform data in X.
Args:
X:
Datapoints stored in numpy array of size n x d.
Returns:
Numpy array of n x p datapoints where p <= d.
"""
X = X.astype(np.float32)
X = X - self.mean + self.eps
return X.dot(self.w)[:, : self.n_components]
def fit_pca(embeddings: np.ndarray, n_components: int = 2, fraction: float = None):
"""Fits PCA to randomly selected subset of embeddings.
For large datasets, it can be unfeasible to perform PCA on the whole data.
This method can fit a PCA on a fraction of the embeddings in order to save
computational resources.
Args:
embeddings:
Datapoints stored in numpy array of size n x d.
n_components:
Number of principal components to keep.
fraction:
Fraction of the dataset to fit PCA on.
Returns:
A transformer which can be used to transform embeddings
to lower dimensions.
Raises:
ValueError: If fraction < 0 or fraction > 1.
"""
if fraction is not None:
if fraction < 0.0 or fraction > 1.0:
msg = f"fraction must be in [0, 1] but was {fraction}."
raise ValueError(msg)
N = embeddings.shape[0]
n = N if fraction is None else min(N, int(N * fraction))
X = embeddings[np.random.permutation(N)][:n]
return PCA(n_components=n_components).fit(X)
| 2,604 | 26.712766 | 83 | py |
lightly | lightly-master/lightly/utils/hipify.py | import copy
import warnings
from typing import Type
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def _custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return f"{bcolors.WARNING}{msg}{bcolors.WARNING}\n"
def print_as_warning(message: str, warning_class: Type[Warning] = UserWarning):
old_format = copy.copy(warnings.formatwarning)
warnings.formatwarning = _custom_formatwarning
warnings.warn(message, warning_class)
warnings.formatwarning = old_format
| 675 | 22.310345 | 79 | py |
lightly | lightly-master/lightly/utils/io.py | """ I/O operations to save and load embeddings. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import csv
import json
import re
from itertools import compress
from typing import Dict, List, Tuple
import numpy as np
INVALID_FILENAME_CHARACTERS = [","]
def _is_valid_filename(filename: str) -> bool:
"""Returns False if the filename is misformatted."""
for character in INVALID_FILENAME_CHARACTERS:
if character in filename:
return False
return True
def check_filenames(filenames: List[str]):
"""Raises an error if one of the filenames is misformatted
Args:
filenames:
A list of string being filenames
"""
invalid_filenames = [f for f in filenames if not _is_valid_filename(f)]
if len(invalid_filenames) > 0:
raise ValueError(f"Invalid filename(s): {invalid_filenames}")
def check_embeddings(path: str, remove_additional_columns: bool = False):
"""Raises an error if the embeddings csv file has not the correct format
Use this check whenever you want to upload an embedding to the Lightly
Platform.
This method only checks whether the header row matches the specs:
https://docs.lightly.ai/self-supervised-learning/getting_started/command_line_tool.html#id1
Args:
path:
Path to the embedding csv file
remove_additional_columns:
If True, all additional columns
which are not in {filenames, embeddings_x, labels} are removed.
If false, they are kept unchanged.
Raises:
RuntimeError
"""
with open(path, "r", newline="") as csv_file:
reader = csv.reader(csv_file, delimiter=",")
header: List[str] = next(reader)
# check for whitespace in the header (we don't allow this)
if any(x != x.strip() for x in header):
raise RuntimeError("Embeddings csv file must not contain whitespaces.")
# first col is `filenames`
if header[0] != "filenames":
raise RuntimeError(
f"Embeddings csv file must start with `filenames` "
f"column but had {header[0]} instead."
)
# `labels` exists
try:
header_labels_idx = header.index("labels")
except ValueError:
raise RuntimeError(f"Embeddings csv file has no `labels` column.")
# cols between first and `labels` are `embedding_x`
for embedding_header in header[1:header_labels_idx]:
if not re.match(r"embedding_\d+", embedding_header):
# check if we have a special column
if not embedding_header in ["masked", "selected"]:
raise RuntimeError(
f"Embeddings csv file must have `embedding_x` columns but "
f"found {embedding_header} instead."
)
# check for empty rows in the body of the csv file
for i, row in enumerate(reader):
if len(row) == 0:
raise RuntimeError(
f"Embeddings csv file must not have empty rows. "
f"Found empty row on line {i}."
)
if remove_additional_columns:
new_rows = []
with open(path, "r", newline="") as csv_file:
reader = csv.reader(csv_file, delimiter=",")
header_row = next(reader)
# create mask of columns to keep only filenames, embedding_ or labels
regexp = r"filenames|(embedding_\d+)|labels"
col_mask = []
for i, col in enumerate(header_row):
col_mask += [True] if re.match(regexp, col) else [False]
# add header row manually here since we use an iterator
new_rows.append(list(compress(header_row, col_mask)))
for row in reader:
# apply mask to only use filenames, embedding_ or labels
new_rows.append(list(compress(row, col_mask)))
with open(path, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=",")
writer.writerows(new_rows)
def save_embeddings(
path: str, embeddings: np.ndarray, labels: List[int], filenames: List[str]
):
"""Saves embeddings in a csv file in a Lightly compatible format.
Creates a csv file at the location specified by path and saves embeddings,
labels, and filenames.
Args:
path:
Path to the csv file.
embeddings:
Embeddings of the images as a numpy array (n x d).
labels:
List of integer labels.
filenames:
List of filenames.
Raises:
ValueError: If embeddings, labels, and filenames have different lengths.
Examples:
>>> import lightly.utils.io as io
>>> io.save_embeddings(
>>> 'path/to/my/embeddings.csv',
>>> embeddings,
>>> labels,
>>> filenames)
"""
check_filenames(filenames)
n_embeddings = len(embeddings)
n_filenames = len(filenames)
n_labels = len(labels)
if n_embeddings != n_labels or n_filenames != n_labels:
msg = "Length of embeddings, labels, and filenames should be equal "
msg += f" but are not: ({n_embeddings}, {n_filenames}, {n_labels})"
raise ValueError(msg)
header = ["filenames"]
header = header + [f"embedding_{i}" for i in range(embeddings.shape[-1])]
header = header + ["labels"]
with open(path, "w", newline="") as csv_file:
writer = csv.writer(csv_file, delimiter=",")
writer.writerow(header)
for filename, embedding, label in zip(filenames, embeddings, labels):
writer.writerow([filename] + list(embedding) + [str(label)])
def load_embeddings(path: str):
"""Loads embeddings from a csv file in a Lightly compatible format.
Args:
path:
Path to the csv file.
Returns:
The embeddings as a numpy array, labels as a list of integers, and
filenames as a list of strings in the order they were saved.
The embeddings will always be of the Float32 datatype.
Examples:
>>> import lightly.utils.io as io
>>> embeddings, labels, filenames = io.load_embeddings(
>>> 'path/to/my/embeddings.csv')
"""
check_embeddings(path)
filenames, labels = [], []
embeddings = []
with open(path, "r", newline="") as csv_file:
reader = csv.reader(csv_file, delimiter=",")
for i, row in enumerate(reader):
# skip header
if i == 0:
continue
# read filenames and labels
filenames.append(row[0])
labels.append(int(row[-1]))
# read embeddings
embeddings.append(row[1:-1])
check_filenames(filenames)
embeddings = np.array(embeddings).astype(np.float32)
return embeddings, labels, filenames
def load_embeddings_as_dict(
path: str, embedding_name: str = "default", return_all: bool = False
):
"""Loads embeddings from csv and store it in a dictionary for transfer.
Loads embeddings to a dictionary which can be serialized and sent to the
Lightly servers. It is recommended that the embedding_name is always
specified because the Lightly web-app does not allow two embeddings with
the same name.
Args:
path:
Path to the csv file.
embedding_name:
Name of the embedding for the platform.
return_all:
If true, return embeddings, labels, and filenames, too.
Returns:
A dictionary containing the embedding information (see load_embeddings)
Examples:
>>> import lightly.utils.io as io
>>> embedding_dict = io.load_embeddings_as_dict(
>>> 'path/to/my/embeddings.csv',
>>> embedding_name='MyEmbeddings')
>>>
>>> result = io.load_embeddings_as_dict(
>>> 'path/to/my/embeddings.csv',
>>> embedding_name='MyEmbeddings',
>>> return_all=True)
>>> embedding_dict, embeddings, labels, filenames = result
"""
embeddings, labels, filenames = load_embeddings(path)
# build dictionary
data = {"embeddingName": embedding_name, "embeddings": []}
for embedding, filename, label in zip(embeddings, filenames, labels):
item = {"fileName": filename, "value": embedding.tolist(), "label": label}
data["embeddings"].append(item)
# return embeddings along with dictionary
if return_all:
return data, embeddings, labels, filenames
else:
return data
class COCO_ANNOTATION_KEYS:
"""Enum of coco annotation keys complemented with a key for custom metadata."""
# image keys
images: str = "images"
images_id: str = "id"
images_filename: str = "file_name"
# metadata keys
custom_metadata: str = "metadata"
custom_metadata_image_id: str = "image_id"
def format_custom_metadata(custom_metadata: List[Tuple[str, Dict]]):
"""Transforms custom metadata into a format which can be handled by Lightly.
Args:
custom_metadata:
List of tuples (filename, metadata) where metadata is a dictionary.
Returns:
A dictionary of formatted custom metadata.
Examples:
>>> custom_metadata = [
>>> ('hello.png', {'number_of_people': 1}),
>>> ('world.png', {'number_of_people': 3}),
>>> ]
>>>
>>> format_custom_metadata(custom_metadata)
>>> > {
>>> > 'images': [{'id': 0, 'file_name': 'hello.png'}, {'id': 1, 'file_name': 'world.png'}],
>>> > 'metadata': [{'image_id': 0, 'number_of_people': 1}, {'image_id': 1, 'number_of_people': 3}]
>>> > }
"""
formatted = {
COCO_ANNOTATION_KEYS.images: [],
COCO_ANNOTATION_KEYS.custom_metadata: [],
}
for i, (filename, metadata) in enumerate(custom_metadata):
formatted[COCO_ANNOTATION_KEYS.images].append(
{
COCO_ANNOTATION_KEYS.images_id: i,
COCO_ANNOTATION_KEYS.images_filename: filename,
}
)
formatted[COCO_ANNOTATION_KEYS.custom_metadata].append(
{
COCO_ANNOTATION_KEYS.custom_metadata_image_id: i,
**metadata,
}
)
return formatted
def save_custom_metadata(path: str, custom_metadata: List[Tuple[str, Dict]]):
"""Saves custom metadata in a .json.
Args:
path:
Filename of the .json file where the data should be stored.
custom_metadata:
List of tuples (filename, metadata) where metadata is a dictionary.
"""
formatted = format_custom_metadata(custom_metadata)
with open(path, "w") as f:
json.dump(formatted, f)
def save_tasks(
path: str,
tasks: List[str],
):
"""Saves a list of prediction task names in the right format.
Args:
path:
Where to store the task names.
tasks:
List of task names.
"""
with open(path, "w") as f:
json.dump(tasks, f)
def save_schema(path: str, task_type: str, ids: List[int], names: List[str]):
"""Saves a prediction schema in the right format.
Args:
path:
Where to store the schema.
task_type:
Task type (e.g. classification, object-detection).
ids:
List of category ids.
names:
List of category names.
"""
if len(ids) != len(names):
raise ValueError("ids and names must have same length!")
schema = {
"task_type": task_type,
"categories": [{"id": id, "name": name} for id, name in zip(ids, names)],
}
with open(path, "w") as f:
json.dump(schema, f)
| 11,880 | 30.938172 | 108 | py |
lightly | lightly-master/lightly/utils/lars.py | import torch
from torch.optim.optimizer import Optimizer, required
class LARS(Optimizer):
"""Extends SGD in PyTorch with LARS scaling from the paper "Large batch training of
Convolutional Networks" [0].
Implementation from PyTorch Lightning Bolts [1].
- [0]: https://arxiv.org/pdf/1708.03888.pdf
- [1]: https://github.com/Lightning-Universe/lightning-bolts/blob/2dfe45a4cf050f120d10981c45cfa2c785a1d5e6/pl_bolts/optimizers/lars.py#L1
Args:
params:
Iterable of parameters to optimize or dicts defining parameter groups.
lr:
Learning rate
momentum:
Momentum factor.
weight_decay:
Weight decay (L2 penalty).
dampening:
Dampening for momentum.
nesterov:
Enables Nesterov momentum.
trust_coefficient:
Trust coefficient for computing learning rate.
eps:
Eps for division denominator.
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
.. note::
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
\begin{aligned}
g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
"""
def __init__(
self,
params,
lr: float = required,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False,
trust_coefficient: float = 0.001,
eps: float = 1e-8,
):
if lr is not required and lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (
g_norm + p_norm * weight_decay + group["eps"]
)
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| 5,481 | 33.477987 | 141 | py |
lightly | lightly-master/lightly/utils/reordering.py | from typing import List, Sized
def sort_items_by_keys(keys: List[any], items: List[any], sorted_keys: List[any]):
"""Sorts the items in the same order as the sorted keys.
Args:
keys:
Keys by which items can be identified.
items:
Items to sort.
sorted_keys:
Keys in sorted order.
Returns:
The list of sorted items.
Examples:
>>> keys = [3, 2, 1]
>>> items = ['!', 'world', 'hello']
>>> sorted_keys = [1, 2, 3]
>>> sorted_items = sort_items_by_keys(
>>> keys,
>>> items,
>>> sorted_keys,
>>> )
>>> print(sorted_items)
>>> > ['hello', 'world', '!']
"""
if len(keys) != len(items) or len(keys) != len(sorted_keys):
raise ValueError(
f"All inputs (keys, items and sorted_keys) "
f"must have the same length, "
f"but their lengths are: ({len(keys)},"
f"{len(items)} and {len(sorted_keys)})."
)
lookup = {key_: item_ for key_, item_ in zip(keys, items)}
sorted_ = [lookup[key_] for key_ in sorted_keys]
return sorted_
| 1,178 | 27.756098 | 82 | py |
lightly | lightly-master/lightly/utils/scheduler.py | import warnings
import numpy as np
import torch
def cosine_schedule(
step: float, max_steps: float, start_value: float, end_value: float
) -> float:
"""
Use cosine decay to gradually modify start_value to reach target end_value during iterations.
Args:
step:
Current step number.
max_steps:
Total number of steps.
start_value:
Starting value.
end_value:
Target value.
Returns:
Cosine decay value.
"""
if step < 0:
raise ValueError("Current step number can't be negative")
if max_steps < 1:
raise ValueError("Total step number must be >= 1")
if step > max_steps:
warnings.warn(
f"Current step number {step} exceeds max_steps {max_steps}.",
category=RuntimeWarning,
)
if max_steps == 1:
# Avoid division by zero
decay = end_value
elif step == max_steps:
# Special case for Pytorch Lightning which updates LR scheduler also for epoch
# after last training epoch.
decay = end_value
else:
decay = (
end_value
- (end_value - start_value)
* (np.cos(np.pi * step / (max_steps - 1)) + 1)
/ 2
)
return decay
class CosineWarmupScheduler(torch.optim.lr_scheduler.LambdaLR):
"""
Cosine warmup scheduler for learning rate.
Args:
optimizer:
Optimizer object to schedule the learning rate.
warmup_epochs:
Number of warmup epochs or steps.
max_epochs:
Total number of training epochs or steps.
last_epoch:
The index of last epoch or step. Default: -1
start_value:
Starting learning rate scale. Default: 1.0
end_value:
Target learning rate scale. Default: 0.001
verbose:
If True, prints a message to stdout for each update. Default: False.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
warmup_epochs: float,
max_epochs: float,
last_epoch: float = -1,
start_value: float = 1.0,
end_value: float = 0.001,
verbose: bool = False,
) -> None:
self.warmup_epochs = warmup_epochs
self.max_epochs = max_epochs
self.start_value = start_value
self.end_value = end_value
super().__init__(
optimizer=optimizer,
lr_lambda=self.scale_lr,
last_epoch=last_epoch,
verbose=verbose,
)
def scale_lr(self, epoch: int) -> float:
"""
Scale learning rate according to the current epoch number.
Args:
epoch:
Current epoch number.
Returns:
Scaled learning rate.
"""
if epoch < self.warmup_epochs:
return (epoch + 1) / self.warmup_epochs
else:
return cosine_schedule(
step=epoch - self.warmup_epochs,
max_steps=self.max_epochs - self.warmup_epochs,
start_value=self.start_value,
end_value=self.end_value,
)
| 3,212 | 26.461538 | 97 | py |
lightly | lightly-master/lightly/utils/version_compare.py | """ Utility method for comparing versions of libraries """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
def version_compare(v0: str, v1: str):
"""Returns 1 if version of v0 is larger than v1 and -1 otherwise
Use this method to compare Python package versions and see which one is
newer.
Examples:
>>> # compare two versions
>>> version_compare('1.2.0', '1.1.2')
>>> 1
"""
v0 = [int(n) for n in v0.split(".")][::-1]
v1 = [int(n) for n in v1.split(".")][::-1]
if len(v0) != 3 or len(v1) != 3:
raise ValueError(
f"Length of version strings is not 3 (expected pattern `x.y.z`) but is "
f"{v0} and {v1}."
)
pairs = list(zip(v0, v1))[::-1]
for x, y in pairs:
if x < y:
return -1
if x > y:
return 1
return 0
| 885 | 25.848485 | 84 | py |
lightly | lightly-master/lightly/utils/benchmarking/__init__.py | from lightly.utils.benchmarking.benchmark_module import BenchmarkModule
from lightly.utils.benchmarking.knn import knn_predict
from lightly.utils.benchmarking.knn_classifier import KNNClassifier
from lightly.utils.benchmarking.linear_classifier import LinearClassifier
from lightly.utils.benchmarking.metric_callback import MetricCallback
from lightly.utils.benchmarking.online_linear_classifier import OnlineLinearClassifier
| 426 | 60 | 86 | py |
lightly | lightly-master/lightly/utils/benchmarking/benchmark_module.py | # Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from typing import List, Optional
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.utils.data import DataLoader
from lightly.utils.benchmarking.knn import knn_predict
class BenchmarkModule(LightningModule):
"""A PyTorch Lightning Module for automated kNN callback
At the end of every training epoch we create a feature bank by feeding the
`dataloader_kNN` passed to the module through the backbone.
At every validation step we predict features on the validation data.
After all predictions on validation data (validation_epoch_end) we evaluate
the predictions on a kNN classifier on the validation data using the
feature_bank features from the train data.
We can access the highest test accuracy during a kNN prediction
using the `max_accuracy` attribute.
Attributes:
backbone:
The backbone model used for kNN validation. Make sure that you set the
backbone when inheriting from `BenchmarkModule`.
max_accuracy:
Floating point number between 0.0 and 1.0 representing the maximum
test accuracy the benchmarked model has achieved.
dataloader_kNN:
Dataloader to be used after each training epoch to create feature bank.
num_classes:
Number of classes. E.g. for cifar10 we have 10 classes. (default: 10)
knn_k:
Number of nearest neighbors for kNN
knn_t:
Temperature parameter for kNN
Examples:
>>> class SimSiamModel(BenchmarkingModule):
>>> def __init__(dataloader_kNN, num_classes):
>>> super().__init__(dataloader_kNN, num_classes)
>>> resnet = lightly.models.ResNetGenerator('resnet-18')
>>> self.backbone = nn.Sequential(
>>> *list(resnet.children())[:-1],
>>> nn.AdaptiveAvgPool2d(1),
>>> )
>>> self.resnet_simsiam =
>>> lightly.models.SimSiam(self.backbone, num_ftrs=512)
>>> self.criterion = lightly.loss.SymNegCosineSimilarityLoss()
>>>
>>> def forward(self, x):
>>> self.resnet_simsiam(x)
>>>
>>> def training_step(self, batch, batch_idx):
>>> (x0, x1), _, _ = batch
>>> x0, x1 = self.resnet_simsiam(x0, x1)
>>> loss = self.criterion(x0, x1)
>>> return loss
>>> def configure_optimizers(self):
>>> optim = torch.optim.SGD(
>>> self.resnet_simsiam.parameters(), lr=6e-2, momentum=0.9
>>> )
>>> return [optim]
>>>
>>> model = SimSiamModel(dataloader_train_kNN)
>>> trainer = pl.Trainer()
>>> trainer.fit(
>>> model,
>>> train_dataloader=dataloader_train_ssl,
>>> val_dataloaders=dataloader_test
>>> )
>>> # you can get the peak accuracy using
>>> print(model.max_accuracy)
"""
def __init__(
self,
dataloader_kNN: DataLoader,
num_classes: int,
knn_k: int = 200,
knn_t: float = 0.1,
):
super().__init__()
self.backbone = nn.Module()
self.max_accuracy = 0.0
self.dataloader_kNN = dataloader_kNN
self.num_classes = num_classes
self.knn_k = knn_k
self.knn_t = knn_t
self._train_features: Optional[Tensor] = None
self._train_targets: Optional[Tensor] = None
self._val_predicted_labels: List[Tensor] = []
self._val_targets: List[Tensor] = []
def on_validation_epoch_start(self) -> None:
train_features = []
train_targets = []
with torch.no_grad():
for data in self.dataloader_kNN:
img, target, _ = data
img = img.to(self.device)
target = target.to(self.device)
feature = self.backbone(img).squeeze()
feature = F.normalize(feature, dim=1)
if (
dist.is_available()
and dist.is_initialized()
and dist.get_world_size() > 0
):
# gather features and targets from all processes
feature = torch.cat(dist.gather(feature), 0)
target = torch.cat(dist.gather(target), 0)
train_features.append(feature)
train_targets.append(target)
self._train_features = torch.cat(train_features, dim=0).t().contiguous()
self._train_targets = torch.cat(train_targets, dim=0).t().contiguous()
def validation_step(self, batch, batch_idx) -> None:
# we can only do kNN predictions once we have a feature bank
if self._train_features is not None and self._train_targets is not None:
images, targets, _ = batch
feature = self.backbone(images).squeeze()
feature = F.normalize(feature, dim=1)
predicted_labels = knn_predict(
feature,
self._train_features,
self._train_targets,
self.num_classes,
self.knn_k,
self.knn_t,
)
if dist.is_initialized() and dist.get_world_size() > 0:
# gather predictions and targets from all processes
predicted_labels = torch.cat(dist.gather(predicted_labels), 0)
targets = torch.cat(dist.gather(targets), 0)
self._val_predicted_labels.append(predicted_labels.cpu())
self._val_targets.append(targets.cpu())
def on_validation_epoch_end(self) -> None:
if self._val_predicted_labels and self._val_targets:
predicted_labels = torch.cat(self._val_predicted_labels, dim=0)
targets = torch.cat(self._val_targets, dim=0)
top1 = (predicted_labels[:, 0] == targets).float().sum()
acc = top1 / len(targets)
if acc > self.max_accuracy:
self.max_accuracy = acc.item()
self.log("kNN_accuracy", acc * 100.0, prog_bar=True)
self._val_predicted_labels.clear()
self._val_targets.clear()
| 6,475 | 38.975309 | 83 | py |
lightly | lightly-master/lightly/utils/benchmarking/knn.py | import torch
from torch import Tensor
# code for kNN prediction from here:
# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb
def knn_predict(
feature: Tensor,
feature_bank: Tensor,
feature_labels: Tensor,
num_classes: int,
knn_k: int = 200,
knn_t: float = 0.1,
) -> Tensor:
"""Run kNN predictions on features based on a feature bank
This method is commonly used to monitor performance of self-supervised
learning methods.
The default parameters are the ones
used in https://arxiv.org/pdf/1805.01978v1.pdf.
Args:
feature:
Tensor with shape (B, D) for which you want predictions.
feature_bank:
Tensor of shape (D, N) of a database of features used for kNN.
feature_labels:
Labels with shape (N,) for the features in the feature_bank.
num_classes:
Number of classes (e.g. `10` for CIFAR-10).
knn_k:
Number of k neighbors used for kNN.
knn_t:
Temperature parameter to reweights similarities for kNN.
Returns:
A tensor containing the kNN predictions
Examples:
>>> images, targets, _ = batch
>>> feature = backbone(images).squeeze()
>>> # we recommend to normalize the features
>>> feature = F.normalize(feature, dim=1)
>>> pred_labels = knn_predict(
>>> feature,
>>> feature_bank,
>>> targets_bank,
>>> num_classes=10,
>>> )
"""
# compute cos similarity between each feature vector and feature bank ---> (B, N)
sim_matrix = torch.mm(feature, feature_bank)
# (B, K)
sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1)
# (B, K)
sim_labels = torch.gather(
feature_labels.expand(feature.size(0), -1), dim=-1, index=sim_indices
)
# we do a reweighting of the similarities
sim_weight = (sim_weight / knn_t).exp()
# counts for each class
one_hot_label = torch.zeros(
feature.size(0) * knn_k, num_classes, device=sim_labels.device
)
# (B*K, C)
one_hot_label = one_hot_label.scatter(
dim=-1, index=sim_labels.view(-1, 1), value=1.0
)
# weighted score ---> (B, C)
pred_scores = torch.sum(
one_hot_label.view(feature.size(0), -1, num_classes)
* sim_weight.unsqueeze(dim=-1),
dim=1,
)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
return pred_labels
| 2,538 | 31.139241 | 114 | py |
lightly | lightly-master/lightly/utils/benchmarking/knn_classifier.py | from typing import Tuple, Union
import torch
import torch.nn.functional as F
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import Module
from lightly.models.utils import activate_requires_grad, deactivate_requires_grad
from lightly.utils.benchmarking import knn_predict
from lightly.utils.benchmarking.topk import mean_topk_accuracy
class KNNClassifier(LightningModule):
def __init__(
self,
model: Module,
num_classes: int,
knn_k: int = 200,
knn_t: float = 0.1,
topk: Tuple[int, ...] = (1, 5),
feature_dtype: torch.dtype = torch.float32,
):
"""KNN classifier for benchmarking.
Settings based on InstDisc [0]. Code adapted from MoCo [1].
- [0]: InstDisc, 2018, https://arxiv.org/pdf/1805.01978v1.pdf
- [1]: MoCo, 2019, https://github.com/facebookresearch/moco
Args:
model:
Model used for feature extraction. Must define a forward(images) method
that returns a feature tensor.
num_classes:
Number of classes in the dataset.
knn_k:
Number of neighbors used for KNN search.
knn_t:
Temperature parameter to reweights similarities.
topk:
Tuple of integers defining the top-k accuracy metrics to compute.
feature_dtype:
Torch data type of the features used for KNN search. Reduce to float16
for memory-efficient KNN search.
Examples:
>>> from pytorch_lightning import Trainer
>>> from torch import nn
>>> import torchvision
>>> from lightly.models import LinearClassifier
>>> from lightly.modles.modules import SimCLRProjectionHead
>>>
>>> class SimCLR(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.backbone = torchvision.models.resnet18()
>>> self.backbone.fc = nn.Identity() # Ignore classification layer
>>> self.projection_head = SimCLRProjectionHead(512, 512, 128)
>>>
>>> def forward(self, x):
>>> # Forward must return image features.
>>> features = self.backbone(x).flatten(start_dim=1)
>>> return features
>>>
>>> # Initialize a model.
>>> model = SimCLR()
>>>
>>>
>>> # Wrap it with a KNNClassifier.
>>> knn_classifier = KNNClassifier(resnet, num_classes=10)
>>>
>>> # Extract features and evaluate.
>>> trainer = Trainer(max_epochs=1)
>>> trainer.fit(knn_classifier, train_dataloder, val_dataloader)
"""
super().__init__()
self.save_hyperparameters(
{
"num_classes": num_classes,
"knn_k": knn_k,
"knn_t": knn_t,
"topk": topk,
"feature_dtype": str(feature_dtype),
}
)
self.model = model
self.num_classes = num_classes
self.knn_k = knn_k
self.knn_t = knn_t
self.topk = topk
self.feature_dtype = feature_dtype
self._train_features = []
self._train_targets = []
self._train_features_tensor: Union[Tensor, None] = None
self._train_targets_tensor: Union[Tensor, None] = None
def training_step(self, batch, batch_idx) -> None:
images, targets = batch[0], batch[1]
features = self.model.forward(images).flatten(start_dim=1)
features = F.normalize(features, dim=1).to(self.feature_dtype)
self._train_features.append(features.cpu())
self._train_targets.append(targets.cpu())
def validation_step(self, batch, batch_idx) -> None:
if self._train_features_tensor is None or self._train_targets_tensor is None:
return
images, targets = batch[0], batch[1]
features = self.model.forward(images).flatten(start_dim=1)
features = F.normalize(features, dim=1).to(self.feature_dtype)
predicted_classes = knn_predict(
feature=features,
feature_bank=self._train_features_tensor,
feature_labels=self._train_targets_tensor,
num_classes=self.num_classes,
knn_k=self.knn_k,
knn_t=self.knn_t,
)
topk = mean_topk_accuracy(
predicted_classes=predicted_classes, targets=targets, k=self.topk
)
log_dict = {f"val_top{k}": acc for k, acc in topk.items()}
self.log_dict(log_dict, prog_bar=True, sync_dist=True, batch_size=len(targets))
def on_validation_epoch_start(self) -> None:
if self._train_features and self._train_targets:
# Features and targets have size (world_size, batch_size, dim) and
# (world_size, batch_size) after gather. For non-distributed training,
# features and targets have size (batch_size, dim) and (batch_size,).
features = self.all_gather(torch.cat(self._train_features, dim=0))
self._train_features = []
targets = self.all_gather(torch.cat(self._train_targets, dim=0))
self._train_targets = []
# Reshape to (dim, world_size * batch_size)
features = features.flatten(end_dim=-2).t().contiguous()
self._train_features_tensor = features.to(self.device)
# Reshape to (world_size * batch_size,)
targets = targets.flatten().t().contiguous()
self._train_targets_tensor = targets.to(self.device)
def on_fit_start(self) -> None:
# Freeze model weights.
deactivate_requires_grad(model=self.model)
def on_fit_end(self) -> None:
# Unfreeze model weights.
activate_requires_grad(model=self.model)
def configure_optimizers(self) -> None:
# configure_optimizers must be implemented for PyTorch Lightning. Returning None
# means that no optimization is performed.
pass
| 6,214 | 38.839744 | 88 | py |
lightly | lightly-master/lightly/utils/benchmarking/linear_classifier.py | from typing import Dict, Tuple
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import CrossEntropyLoss, Linear, Module
from torch.optim import SGD
from lightly.models.utils import activate_requires_grad, deactivate_requires_grad
from lightly.utils.benchmarking.topk import mean_topk_accuracy
from lightly.utils.scheduler import CosineWarmupScheduler
class LinearClassifier(LightningModule):
def __init__(
self,
model: Module,
batch_size_per_device: int,
feature_dim: int = 2048,
num_classes: int = 1000,
topk: Tuple[int, ...] = (1, 5),
freeze_model: bool = False,
) -> None:
"""Linear classifier for benchmarking.
Settings based on SimCLR [0].
- [0]: https://arxiv.org/abs/2002.05709
Args:
model:
Model used for feature extraction. Must define a forward(images) method
that returns a feature tensor.
batch_size_per_device:
Batch size per device.
feature_dim:
Dimension of features returned by forward method of model.
num_classes:
Number of classes in the dataset.
topk:
Tuple of integers defining the top-k accuracy metrics to compute.
freeze_model:
If True, the model is frozen and only the classification head is
trained. This corresponds to the linear eval setting. Set to False for
finetuning.
Examples:
>>> from pytorch_lightning import Trainer
>>> from torch import nn
>>> import torchvision
>>> from lightly.models import LinearClassifier
>>> from lightly.modles.modules import SimCLRProjectionHead
>>>
>>> class SimCLR(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.backbone = torchvision.models.resnet18()
>>> self.backbone.fc = nn.Identity() # Ignore classification layer
>>> self.projection_head = SimCLRProjectionHead(512, 512, 128)
>>>
>>> def forward(self, x):
>>> # Forward must return image features.
>>> features = self.backbone(x).flatten(start_dim=1)
>>> return features
>>>
>>> # Initialize a model.
>>> model = SimCLR()
>>>
>>> # Wrap it with a LinearClassifier.
>>> linear_classifier = LinearClassifier(
>>> model,
>>> batch_size=256,
>>> num_classes=10,
>>> freeze_model=True, # linear evaluation, set to False for finetune
>>> )
>>>
>>> # Train the linear classifier.
>>> trainer = Trainer(max_epochs=90)
>>> trainer.fit(linear_classifier, train_dataloader, val_dataloader)
"""
super().__init__()
self.save_hyperparameters(ignore="model")
self.model = model
self.batch_size_per_device = batch_size_per_device
self.feature_dim = feature_dim
self.num_classes = num_classes
self.topk = topk
self.freeze_model = freeze_model
self.classification_head = Linear(feature_dim, num_classes)
self.criterion = CrossEntropyLoss()
def forward(self, images: Tensor) -> Tensor:
features = self.model.forward(images).flatten(start_dim=1)
return self.classification_head(features)
def shared_step(self, batch, batch_idx) -> Tuple[Tensor, Dict[int, Tensor]]:
images, targets = batch[0], batch[1]
predictions = self.forward(images)
loss = self.criterion(predictions, targets)
_, predicted_labels = predictions.topk(max(self.topk))
topk = mean_topk_accuracy(predicted_labels, targets, k=self.topk)
return loss, topk
def training_step(self, batch, batch_idx) -> Tensor:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
batch_size = len(batch[1])
log_dict = {f"train_top{k}": acc for k, acc in topk.items()}
self.log(
"train_loss", loss, prog_bar=True, sync_dist=True, batch_size=batch_size
)
self.log_dict(log_dict, sync_dist=True, batch_size=batch_size)
return loss
def validation_step(self, batch, batch_idx) -> Tensor:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
batch_size = len(batch[1])
log_dict = {f"val_top{k}": acc for k, acc in topk.items()}
self.log("val_loss", loss, prog_bar=True, sync_dist=True, batch_size=batch_size)
self.log_dict(log_dict, prog_bar=True, sync_dist=True, batch_size=batch_size)
return loss
def configure_optimizers(self):
parameters = list(self.classification_head.parameters())
if not self.freeze_model:
parameters += self.model.parameters()
optimizer = SGD(
parameters,
lr=0.1 * self.batch_size_per_device * self.trainer.world_size / 256,
momentum=0.9,
weight_decay=0.0,
)
scheduler = {
"scheduler": CosineWarmupScheduler(
optimizer=optimizer,
warmup_epochs=0,
max_epochs=self.trainer.estimated_stepping_batches,
),
"interval": "step",
}
return [optimizer], [scheduler]
def on_fit_start(self) -> None:
# Freeze model weights.
if self.freeze_model:
deactivate_requires_grad(model=self.model)
def on_fit_end(self) -> None:
# Unfreeze model weights.
if self.freeze_model:
activate_requires_grad(model=self.model)
| 5,896 | 37.292208 | 88 | py |
lightly | lightly-master/lightly/utils/benchmarking/metric_callback.py | from typing import Dict, List
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import Callback
from torch import Tensor
class MetricCallback(Callback):
"""Callback that collects log metrics from the LightningModule and stores them after
every epoch.
Attributes:
train_metrics:
Dictionary that stores the last logged metrics after every train epoch.
val_metrics:
Dictionary that stores the last logged metrics after every validation epoch.
Example::
>>> from lightly.utils.benchmarking import MetricCallback
>>> from pytorch_lightning import LightningModule, Trainer
>>>
>>> class Model(LightningModule):
>>> def training_step(self, batch, batch_idx):
>>> ...
>>> self.log("train_acc", acc)
>>> ...
>>>
>>> def validation_step(self, batch, batch_idx):
>>> ...
>>> self.log("val_acc", acc)
>>> ...
>>>
>>> metric_callback = MetricCallback()
>>> trainer = Trainer(callbacks=[metric_callback], max_epochs=10)
>>> trainer.fit(Model(), train_dataloder, val_dataloader)
>>>
>>> max_train_acc = max(metric_callback.train_metrics["train_acc"])
>>> max_val_acc = max(metric_callback.val_metrics["val_acc"])
"""
def __init__(self):
super().__init__()
self.train_metrics: Dict[str, List[float]] = {}
self.val_metrics: Dict[str, List[float]] = {}
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
if not trainer.sanity_checking:
self._append_metrics(metrics_dict=self.train_metrics, trainer=trainer)
def on_validation_epoch_end(
self, trainer: Trainer, pl_module: LightningModule
) -> None:
if not trainer.sanity_checking:
self._append_metrics(metrics_dict=self.val_metrics, trainer=trainer)
def _append_metrics(
self, metrics_dict: Dict[str, List[float]], trainer: Trainer
) -> None:
for name, value in trainer.callback_metrics.items():
# Only store scalar values.
if isinstance(value, float) or (
isinstance(value, Tensor) and value.numel() == 1
):
metrics_dict.setdefault(name, []).append(float(value))
| 2,432 | 35.863636 | 88 | py |
lightly | lightly-master/lightly/utils/benchmarking/online_linear_classifier.py | from typing import Dict, Tuple
from pytorch_lightning import LightningModule
from torch import Tensor
from torch.nn import CrossEntropyLoss, Linear
from lightly.utils.benchmarking.topk import mean_topk_accuracy
class OnlineLinearClassifier(LightningModule):
def __init__(
self,
feature_dim: int = 2048,
num_classes: int = 1000,
topk: Tuple[int, ...] = (1, 5),
) -> None:
super().__init__()
self.feature_dim = feature_dim
self.num_classes = num_classes
self.topk = topk
self.classification_head = Linear(feature_dim, num_classes)
self.criterion = CrossEntropyLoss()
def forward(self, x: Tensor) -> Tensor:
return self.classification_head(x.detach().flatten(start_dim=1))
def shared_step(self, batch, batch_idx) -> Tuple[Tensor, Dict[int, Tensor]]:
features, targets = batch[0], batch[1]
predictions = self.forward(features)
loss = self.criterion(predictions, targets)
_, predicted_classes = predictions.topk(max(self.topk))
topk = mean_topk_accuracy(predicted_classes, targets, k=self.topk)
return loss, topk
def training_step(self, batch, batch_idx) -> Tuple[Tensor, Dict[str, Tensor]]:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
log_dict = {"train_online_cls_loss": loss}
log_dict.update({f"train_online_cls_top{k}": acc for k, acc in topk.items()})
return loss, log_dict
def validation_step(self, batch, batch_idx) -> Tuple[Tensor, Dict[str, Tensor]]:
loss, topk = self.shared_step(batch=batch, batch_idx=batch_idx)
log_dict = {"val_online_cls_loss": loss}
log_dict.update({f"val_online_cls_top{k}": acc for k, acc in topk.items()})
return loss, log_dict
| 1,811 | 37.553191 | 85 | py |
lightly | lightly-master/lightly/utils/benchmarking/topk.py | from typing import Dict, Sequence
import torch
from torch import Tensor
def mean_topk_accuracy(
predicted_classes: Tensor, targets: Tensor, k: Sequence[int]
) -> Dict[int, Tensor]:
"""Computes the mean accuracy for the specified values of k.
The mean is calculated over the batch dimension.
Args:
predicted_classes:
Tensor of shape (batch_size, num_classes) with the predicted classes sorted
in descending order of confidence.
targets:
Tensor of shape (batch_size) containing the target classes.
k:
Sequence of integers specifying the values of k for which the accuracy
should be computed.
Returns:
Dictionary containing the mean accuracy for each value of k. For example for
k=(1, 5) the dictionary could look like this: {1: 0.4, 5: 0.6}.
"""
accuracy = {}
targets = targets.unsqueeze(1)
with torch.no_grad():
for num_k in k:
correct = torch.eq(predicted_classes[:, :num_k], targets)
accuracy[num_k] = correct.float().sum() / targets.shape[0]
return accuracy
| 1,138 | 31.542857 | 87 | py |
lightly | lightly-master/lightly/utils/cropping/crop_image_by_bounding_boxes.py | import os.path
import warnings
from pathlib import Path
from typing import List
from PIL import Image
from tqdm import tqdm
from lightly.data import LightlyDataset
from lightly.utils.bounding_box import BoundingBox
def crop_dataset_by_bounding_boxes_and_save(
dataset: LightlyDataset,
output_dir: str,
bounding_boxes_list_list: List[List[BoundingBox]],
class_indices_list_list: List[List[int]],
class_names: List[str] = None,
) -> List[List[str]]:
"""Crops all images in a dataset by the bounding boxes and saves them in the output dir
Args:
dataset:
The dataset with the images to be cropped. Must contain M images.
output_dir:
The output directory to saved the cropped images to.
bounding_boxes_list_list:
The bounding boxes of the detections for each image. Must have M sublists, one for each image.
Each sublist contains the bounding boxes for each detection, thus N_m elements.
class_indices_list_list:
The object class ids of the detections for each image. Must have M sublists, one for each image.
Each sublist contains the bounding boxes for each detection, thus N_m elements.
class_names:
The names of the classes, used to map the class id to the class name.
Returns:
The filepaths to all saved cropped images. Has M sublists, one for each image.
Each sublist contains the filepath of the crop each detection, thus N_m elements.
"""
filenames_images = dataset.get_filenames()
if len(filenames_images) != len(bounding_boxes_list_list) or len(
filenames_images
) != len(class_indices_list_list):
raise ValueError(
"There must be one bounding box and class index list for each image in the datasets,"
"but the lengths dont align."
)
cropped_image_filepath_list_list: List[List[str]] = []
print(f"Cropping objects out of {len(filenames_images)} images...")
for filename_image, class_indices, bounding_boxes in tqdm(
zip(filenames_images, class_indices_list_list, bounding_boxes_list_list)
):
if not len(class_indices) == len(bounding_boxes):
warnings.warn(
UserWarning(
f"Length of class indices ({len(class_indices)} does not equal length of bounding boxes"
f"({len(bounding_boxes)}. This is an error in the input arguments. "
f"Skipping this image {filename_image}."
)
)
continue
filepath_image = dataset.get_filepath_from_filename(filename_image)
filepath_image_base, image_extension = os.path.splitext(filepath_image)
filepath_out_dir = os.path.join(output_dir, filename_image).replace(
image_extension, ""
)
Path(filepath_out_dir).mkdir(parents=True, exist_ok=True)
image = Image.open(filepath_image)
cropped_images_filepaths = []
# For every image, crop out multiple cropped images, one for each
# bounding box
for index, (class_index, bbox) in enumerate(
(zip(class_indices, bounding_boxes))
):
# determine the filename and filepath of the cropped image
if class_names:
class_name = class_names[class_index]
else:
class_name = f"class{class_index}"
cropped_image_last_filename = f"{index}_{class_name}{image_extension}"
cropped_image_filepath = os.path.join(
filepath_out_dir, cropped_image_last_filename
)
# crop out the image and save it
w, h = image.size
crop_box = (w * bbox.x0, h * bbox.y0, w * bbox.x1, h * bbox.y1)
crop_box = tuple(int(i) for i in crop_box)
cropped_image = image.crop(crop_box)
cropped_image.save(cropped_image_filepath)
# add the filename of the cropped image to the corresponding list
cropped_image_filename: str = os.path.join(
filename_image.replace(image_extension, ""), cropped_image_last_filename
)
cropped_images_filepaths.append(cropped_image_filename)
cropped_image_filepath_list_list.append(cropped_images_filepaths)
return cropped_image_filepath_list_list
| 4,398 | 39.357798 | 108 | py |
lightly | lightly-master/lightly/utils/cropping/read_yolo_label_file.py | from typing import List, Tuple
from lightly.utils.bounding_box import BoundingBox
def read_yolo_label_file(
filepath: str, padding: float, separator: str = " "
) -> Tuple[List[int], List[BoundingBox]]:
"""Reads a file in the yolo file format
Args:
filepath:
The path to the yolo file, usually a .txt file.
padding:
The relative passepartout / padding to be added around the bounding box
separator:
The separator character between the 5 values (class id, x, y, w, h) per row in the label file.
Returns:
Two lists, each with one element per row in the label file:
The class indices.
The bounding boxes.
"""
with open(filepath, "r") as f:
lines = f.readlines()
class_indices = []
bounding_boxes = []
for line in lines:
values = line.split(sep=separator)
class_id, x_norm, y_norm, w_norm, h_norm = (float(val) for val in values)
class_id = int(class_id)
class_indices.append(class_id)
w_norm *= 1 + padding
h_norm *= 1 + padding
bbox = BoundingBox.from_yolo_label(x_norm, y_norm, w_norm, h_norm)
bounding_boxes.append(bbox)
return class_indices, bounding_boxes
| 1,269 | 29.97561 | 106 | py |
lightly | lightly-master/tests/REAMDE.md | # How to write tests for the API
There are three different locations to write tests for the API, each with its
own advantages and disadvantages:
1. In tests/api_workflow_client:
This is for testing the api_workflow_client directly with the ability to configure its mocked version fully.
Furthermore, you have a (partly) stateful api_workflow_client.
2. In tests/cli:
This is for testing the cli commands. However, it will use a new api_workflow_client
for every new cli command. It does not allow configuring the mocked api_workflow_client.
3. In tests/UNMOCKED_end2end_tests:
This runs tests against the live API. | 619 | 46.692308 | 108 | md |
lightly | lightly-master/tests/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/tests/conftest.py | # content of conftest.py
import os
from unittest import mock
import pytest
def pytest_addoption(parser):
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)
def pytest_configure(config):
"""Pytest configuration hook, for docs see:
https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.hookspec.pytest_configure
This hook runs before any tests are collected or run.
"""
config.addinivalue_line("markers", "slow: mark test as slow to run")
# This avoids running a version check when importing anything from lightly.
# See lightly/__init__.py. Note that we cannot mock the version check
# in __init__.py because it already runs when pytest collects the tests. This
# happens before any fixtures are applied and therefore the mocking is not yet in
# place.
os.environ["LIGHTLY_DID_VERSION_CHECK"] = "True"
# This avoids sending requests to the API.
os.environ["LIGHTLY_SERVER_LOCATION"] = "https://dummy-url"
def pytest_collection_modifyitems(config, items):
if config.getoption("--runslow"):
# --runslow given in cli: do not skip slow tests
return
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
@pytest.fixture(scope="session", autouse=True)
def mock_versioning_api():
"""Fixture that is applied to all tests and mocks the versioning API.
This is necessary because everytime an ApiWorkflowClient instance is created, a call
to the versioning API is made. This fixture makes sure that these calls succeed
while not actually sending any requests to the API.
It mocks:
- VersioningApi.get_latest_pip_version to always return the current version. This
avoids any errors/warnings related to not using the latest version.
- VersioningApi.get_minimum_compatible_pip_version to always return 1.0.0 which
should be compatible with all future versions.
"""
def mock_get_latest_pip_version(current_version: str) -> str:
return current_version
# NOTE(guarin, 2/6/23): Cannot use pytest mocker fixture here because it has not
# a "session" scope and it is not possible to use a fixture that has a tigher scope
# inside a fixture with a wider scope.
with mock.patch(
"lightly.api.version_checking.VersioningApi.get_latest_pip_version",
new=mock_get_latest_pip_version,
), mock.patch(
"lightly.api.version_checking.VersioningApi.get_minimum_compatible_pip_version",
return_value="1.0.0",
):
yield
| 2,681 | 35.739726 | 94 | py |
lightly | lightly-master/tests/UNMOCKED_end2end_tests/README.md | This repository contains scripts to test the python package with a server.
## Testing the Server API with CLI commands and active learning
You only need an account on the server.
Once you have a token from our production server `https://app.lightly.ai`, you can run:
```bash
cd ../../../lightly # ensure you are in the top directory
pip uninstall lightly -y
pip install .
bash tests/UNMOCKED_end2end_tests/run_all_unmocked_tests.sh LIGHTLY_TOKEN
```
## Testing the Server API with CLI commands
You only need an account on the server and a dataset.
Once you have a token from our production server `https://app.lightly.ai`, you can run:
```bash
bash test_api_on_branch.sh path/to/dataset LIGHTLY_TOKEN
```
## Testing the API latency
This needs a token, but no dataset
```bash
LIGHTLY_TOKEN="MY_TOKEN" && python tests/UNMOCKED_end2end_tests/test_api_latency.py
```
## Testing the upload speed
Use the pycharm profile with yappi to run the function [benchmark_upload.py:benchmark_upload()](benchmark_upload.py).
You can use the following script for example: [call_benchmark_upload](call_benchmark_upload.py) | 1,112 | 37.37931 | 118 | md |
lightly | lightly-master/tests/UNMOCKED_end2end_tests/create_custom_metadata_from_input_dir.py | import sys
from lightly.data import LightlyDataset
from lightly.utils.io import save_custom_metadata
if __name__ == "__main__":
if len(sys.argv) == 1 + 2:
input_dir, metadata_filename = (sys.argv[1 + i] for i in range(2))
else:
raise ValueError(
"ERROR in number of command line arguments, must be 2."
"Example: python create_custom_metadata_from_input_dir.py input_dir metadata_filename"
)
dataset = LightlyDataset(input_dir)
# create a list of pairs of (filename, metadata)
custom_metadata = []
for index, filename in enumerate(dataset.get_filenames()):
metadata = {"index": index}
custom_metadata.append((filename, metadata))
save_custom_metadata(metadata_filename, custom_metadata)
| 783 | 31.666667 | 98 | py |
lightly | lightly-master/tests/UNMOCKED_end2end_tests/delete_datasets_test_unmocked_cli.py | import sys
from lightly.api import ApiWorkflowClient
if __name__ == "__main__":
if len(sys.argv) == 1 + 3:
num_datasets, token, date_time = (sys.argv[1 + i] for i in range(3))
else:
raise ValueError(
"ERROR in number of command line arguments, must be 3."
"Example: python delete_datasets_test_unmocked_cli.py 6 LIGHTLY_TOKEN 2022-09-29-13-41-24"
)
api_workflow_client = ApiWorkflowClient(token=token)
num_datasets = int(num_datasets)
for i in range(1, num_datasets + 1):
dataset_name = f"test_unmocked_cli_{i}_{date_time}"
api_workflow_client.set_dataset_id_by_name(dataset_name)
api_workflow_client.delete_dataset_by_id(api_workflow_client.dataset_id)
| 750 | 34.761905 | 102 | py |
lightly | lightly-master/tests/UNMOCKED_end2end_tests/run_all_unmocked_tests.sh | #!/bin/bash
set -e
# Get the parameters
export LIGHTLY_TOKEN=$1
[[ -z "$LIGHTLY_TOKEN" ]] && { echo "Error: token is empty" ; exit 1; }
echo "############################### token: ${LIGHTLY_TOKEN}"
DATE_TIME=$(date +%Y-%m-%d-%H-%M-%S)
echo "############################### Download the clothing_dataset_small"
DIR_DATASET=clothing_dataset_small
if [ -d $DIR_DATASET ]; then
echo "Skipping download of dataset, it already exists."
else
git clone https://github.com/alexeygrigorev/clothing-dataset-small $DIR_DATASET
fi
INPUT_DIR="${DIR_DATASET}/test/dress"
CUSTOM_METADATA_FILENAME="${DIR_DATASET}/custom_metadata.json"
python tests/UNMOCKED_end2end_tests/create_custom_metadata_from_input_dir.py $INPUT_DIR $CUSTOM_METADATA_FILENAME
# Run the tests
echo "############################### Test 1"
lightly-magic input_dir=$INPUT_DIR trainer.max_epochs=0
echo "############################### Test 2"
lightly-magic input_dir=$INPUT_DIR trainer.max_epochs=1
echo "############################### Delete dataset again"
rm -rf $DIR_DATASET
| 1,045 | 31.6875 | 113 | sh |
lightly | lightly-master/tests/UNMOCKED_end2end_tests/scripts_for_reproducing_problems/test_api_latency.py | import os
import time
import numpy as np
from tqdm import tqdm
from lightly.api import ApiWorkflowClient
from lightly.openapi_generated.swagger_client import ApiClient, Configuration, QuotaApi
if __name__ == "__main__":
token = os.getenv("LIGHTLY_TOKEN")
api_client = ApiWorkflowClient(token=token).api_client
quota_api = QuotaApi(api_client)
n_iters = 200
latencies = np.zeros(n_iters)
for i in tqdm(range(n_iters)):
start = time.time()
quota_api.get_quota_maximum_dataset_size()
duration = time.time() - start
latencies[i] = duration
def format_latency(latency: float):
return f"{latency*1000:.1f}ms"
values = [("min")]
print(
f"Latencies: min: {format_latency(np.min(latencies))}, mean: {format_latency(np.mean(latencies))}, max: {format_latency(np.max(latencies))}"
)
print(f"\nPINGING TO GOOGLE")
response = os.system("ping -c 1 " + "google.com")
| 955 | 27.969697 | 148 | py |
lightly | lightly-master/tests/api/benchmark_video_download.py | import time
import unittest
import av
import numpy as np
from tqdm import tqdm
from lightly.api.download import (
download_all_video_frames,
download_video_frame,
download_video_frames_at_timestamps,
)
@unittest.skip("Only used for benchmarks")
class BenchmarkDownloadVideoFrames(unittest.TestCase):
"""
some timings: https://github.com/lightly-ai/lightly/pull/754
"""
@classmethod
def setUpClass(cls) -> None:
cls.video_url_12min_100mb = "https://mediandr-a.akamaihd.net/progressive/2018/0912/TV-20180912-1628-0000.ln.mp4"
with av.open(cls.video_url_12min_100mb) as container:
stream = container.streams.video[0]
duration = stream.duration
# This video has its timestamps 0-based
cls.timestamps = np.linspace(0, duration, num=1000).astype(int).tolist()
def setUp(self) -> None:
self.start_time = time.time()
def test_download_full(self):
all_video_frames = download_all_video_frames(self.video_url_12min_100mb)
for i, frame in enumerate(tqdm(all_video_frames)):
pass
# Takes very long for many frames, but is very quick for little frames
# The reason is that
# - every function call has quite some overhead
# - as many frames are skipped by the seek, this only reads a little number of frames per function call.
def test_download_at_timestamps_for_loop(self):
for timestamp in tqdm(self.timestamps):
frame = download_video_frame(self.video_url_12min_100mb, timestamp)
def test_download_at_timestamps(self):
frames = download_video_frames_at_timestamps(
self.video_url_12min_100mb, self.timestamps
)
frames = list(tqdm(frames, total=len(self.timestamps)))
# Takes long as it downloads the whole video first
# Takes long, as it access the frames even at random locations, similar to
# downloading specific frame in a for loop.
def test_download_at_indices_decord(self):
"""
See https://github.com/dmlc/decord/issues/199
"""
import decord
vr = decord.VideoReader(self.video_url_12min_100mb)
decord.bridge.set_bridge("torch")
print(f"Took {time.time() - self.start_time}s for creating the video reader.")
frames = vr.get_batch(list(range(0, 18000, 18)))
def tearDown(self) -> None:
print(f"Took {time.time()-self.start_time}s")
| 2,441 | 34.911765 | 120 | py |
lightly | lightly-master/tests/api/test_BitMask.py | import unittest
from copy import deepcopy
from random import randint, random, seed
from lightly.api.bitmask import BitMask
N = 10
class TestBitMask(unittest.TestCase):
def setup(self, psuccess=1.0):
pass
def test_get_and_set(self):
mask = BitMask.from_bin("0b11110000")
self.assertFalse(mask.get_kth_bit(2))
mask.set_kth_bit(2)
self.assertTrue(mask.get_kth_bit(2))
self.assertTrue(mask.get_kth_bit(4))
mask.unset_kth_bit(4)
self.assertFalse(mask.get_kth_bit(4))
def test_large_bitmasks(self):
bitstring = "0b" + "1" * 5678
mask = BitMask.from_bin(bitstring)
mask_as_bitstring = mask.to_bin()
self.assertEqual(mask_as_bitstring, bitstring)
def test_bitmask_from_length(self):
length = 4
mask = BitMask.from_length(length)
self.assertEqual(mask.to_bin(), "0b1111")
def test_get_and_set_outside_of_range(self):
mask = BitMask.from_bin("0b11110000")
self.assertFalse(mask.get_kth_bit(100))
mask.set_kth_bit(100)
self.assertTrue(mask.get_kth_bit(100))
def test_inverse(self):
# TODO: proper implementation
return
x = int("0b11110000", 2)
y = int("0b00001111", 2)
mask = BitMask(x)
mask.invert()
self.assertEqual(mask.x, y)
x = int("0b010101010101010101", 2)
y = int("0b101010101010101010", 2)
mask = BitMask(x)
mask.invert()
self.assertEqual(mask.x, y)
def test_store_and_retrieve(self):
x = int("0b01010100100100100100100010010100100100101001001010101010", 2)
mask = BitMask(x)
mask.set_kth_bit(11)
mask.set_kth_bit(22)
mask.set_kth_bit(33)
mask.set_kth_bit(44)
mask.set_kth_bit(55)
mask.set_kth_bit(66)
mask.set_kth_bit(77)
mask.set_kth_bit(88)
mask.set_kth_bit(99)
somewhere = mask.to_hex()
somewhere_else = mask.to_bin()
mask_somewhere = BitMask.from_hex(somewhere)
mask_somewhere_else = BitMask.from_bin(somewhere_else)
self.assertEqual(mask.x, mask_somewhere.x)
self.assertEqual(mask.x, mask_somewhere_else.x)
def test_union(self):
mask_a = BitMask.from_bin("0b001")
mask_b = BitMask.from_bin("0b100")
mask_a.union(mask_b)
self.assertEqual(mask_a.x, int("0b101", 2))
def test_intersection(self):
mask_a = BitMask.from_bin("0b101")
mask_b = BitMask.from_bin("0b100")
mask_a.intersection(mask_b)
self.assertEqual(mask_a.x, int("0b100", 2))
def assert_difference(self, bistring_1: str, bitstring_2: str, target: str):
mask_a = BitMask.from_bin(bistring_1)
mask_b = BitMask.from_bin(bitstring_2)
mask_a.difference(mask_b)
self.assertEqual(mask_a.x, int(target, 2))
def test_differences(self):
self.assert_difference("0b101", "0b001", "0b100")
self.assert_difference("0b0111", "0b1100", "0b0011")
self.assert_difference("0b10111", "0b01100", "0b10011")
def random_bitstring(self, length: int):
bitsting = "0b"
for i in range(length):
bitsting += str(randint(0, 1))
return bitsting
def test_difference_random(self):
seed(42)
for rep in range(10):
for string_length in range(1, 100, 10):
bitstring_1 = self.random_bitstring(string_length)
bitstring_2 = self.random_bitstring(string_length)
target = "0b"
for bit_1, bit_2 in zip(bitstring_1[2:], bitstring_2[2:]):
if bit_1 == "1" and bit_2 == "0":
target += "1"
else:
target += "0"
self.assert_difference(bitstring_1, bitstring_2, target)
def test_operator_minus(self):
mask_a = BitMask.from_bin("0b10111")
mask_a_old = deepcopy(mask_a)
mask_b = BitMask.from_bin("0b01100")
mask_target = BitMask.from_bin("0b10011")
diff = mask_a - mask_b
self.assertEqual(diff, mask_target)
self.assertEqual(
mask_a_old, mask_a
) # make sure the original mask is unchanged.
def test_equal(self):
mask_a = BitMask.from_bin("0b101")
mask_b = BitMask.from_bin("0b101")
self.assertEqual(mask_a, mask_b)
def test_masked_select_from_list(self):
n = 1000
list_ = [randint(0, 1) for _ in range(n - 2)] + [0, 1]
mask = BitMask.from_length(n)
for index, item_ in enumerate(list_):
if item_ == 0:
mask.unset_kth_bit(index)
else:
mask.set_kth_bit(index)
all_ones = mask.masked_select_from_list(list_)
mask.invert(n)
all_zeros = mask.masked_select_from_list(list_)
self.assertGreater(len(all_ones), 0)
self.assertGreater(len(all_zeros), 0)
self.assertTrue(all([item_ > 0 for item_ in all_ones]))
self.assertTrue(all([item_ == 0 for item_ in all_zeros]))
def test_masked_select_from_list_example(self):
list_ = [1, 2, 3, 4, 5, 6]
mask = BitMask.from_bin("0b001101") # expected result is [1, 3, 4]
selected = mask.masked_select_from_list(list_)
self.assertListEqual(selected, [1, 3, 4])
def test_invert(self):
# get random bitstring
length = 10
bitstring = self.random_bitstring(10)
# get inverse
mask = BitMask.from_bin(bitstring)
mask.invert(length)
inverted = mask.to_bin()
# remove 0b
inverted = inverted[2:]
bitstring = bitstring[2:]
for i in range(min(len(bitstring), len(inverted))):
if bitstring[-i - 1] == "0":
self.assertEqual(inverted[-i - 1], "1")
else:
self.assertEqual(inverted[-i - 1], "0")
def test_nonzero_bits(self):
mask = BitMask.from_bin("0b0")
indices = [100, 1000, 10_000, 100_000]
self.assertEqual(mask.x, 0)
for index in indices:
mask.set_kth_bit(index)
self.assertGreaterEqual(mask.x, 0)
also_indices = mask.to_indices()
for i, j in zip(indices, also_indices):
self.assertEqual(i, j)
| 6,372 | 31.515306 | 80 | py |
lightly | lightly-master/tests/api/test_download.py | import json
import os
import sys
import tempfile
import unittest
import warnings
from io import BytesIO
from unittest import mock
import numpy as np
import tqdm
from PIL import Image
try:
import av
AV_AVAILABLE = True
except ImportError:
AV_AVAILABLE = False
# mock requests module so that files are read from
# disk instead of loading them from a remote url
class MockedRequestsModule:
def get(self, url, stream=None, *args, **kwargs):
return MockedResponse(url)
class Session:
def get(self, url, stream=None, *args, **kwargs):
return MockedResponse(url)
class MockedRequestsModulePartialResponse:
def get(self, url, stream=None, *args, **kwargs):
return MockedResponsePartialStream(url)
def raise_for_status(self):
return
class Session:
def get(self, url, stream=None, *args, **kwargs):
return MockedResponsePartialStream(url)
class MockedResponse:
def __init__(self, raw):
self._raw = raw
@property
def raw(self):
# instead of returning the byte stream from the url
# we just give back an openend filehandle
return open(self._raw, "rb")
@property
def status_code(self):
return 200
def raise_for_status(self):
return
def json(self):
# instead of returning the byte stream from the url
# we just load the json and return the dictionary
with open(self._raw, "r") as f:
return json.load(f)
def __enter__(self):
return self
def __exit__(self, *args):
pass
class MockedResponsePartialStream(MockedResponse):
return_partial_stream = True
@property
def raw(self):
# instead of returning the byte stream from the url
# we just give back an openend filehandle
stream = open(self._raw, "rb")
if self.return_partial_stream:
bytes = stream.read()
stream_first_part = BytesIO(bytes[:1024])
MockedResponsePartialStream.return_partial_stream = False
return stream_first_part
else:
return stream
import lightly
@mock.patch("lightly.api.download.requests", MockedRequestsModulePartialResponse())
class TestDownloadPartialRespons(unittest.TestCase):
def setUp(self):
self._max_retries = lightly.api.utils.RETRY_MAX_RETRIES
self._max_backoff = lightly.api.utils.RETRY_MAX_BACKOFF
lightly.api.utils.RETRY_MAX_RETRIES = 1
lightly.api.utils.RETRY_MAX_BACKOFF = 0
warnings.filterwarnings("ignore")
def tearDown(self):
lightly.api.utils.RETRY_MAX_RETRIES = self._max_retries
lightly.api.utils.RETRY_MAX_BACKOFF = self._max_backoff
warnings.filterwarnings("default")
def test_download_image_half_broken_retry_once(self):
lightly.api.utils.RETRY_MAX_RETRIES = 1
original = _pil_image()
with tempfile.NamedTemporaryFile(suffix=".png") as file:
original.save(file.name)
# assert that the retry fails
with self.assertRaises(RuntimeError) as error:
image = lightly.api.download.download_image(file.name)
self.assertTrue("Maximum retries exceeded" in str(error.exception))
self.assertTrue("<class 'OSError'>" in str(error.exception))
self.assertTrue("image file is truncated" in str(error.exception))
def test_download_image_half_broken_retry_twice(self):
lightly.api.utils.RETRY_MAX_RETRIES = 2
MockedResponse.return_partial_stream = True
original = _pil_image()
with tempfile.NamedTemporaryFile(suffix=".png") as file:
original.save(file.name)
image = lightly.api.download.download_image(file.name)
assert _images_equal(image, original)
@mock.patch("lightly.api.download.requests", MockedRequestsModule())
class TestDownload(unittest.TestCase):
def setUp(self):
self._max_retries = lightly.api.utils.RETRY_MAX_RETRIES
self._max_backoff = lightly.api.utils.RETRY_MAX_BACKOFF
lightly.api.utils.RETRY_MAX_RETRIES = 1
lightly.api.utils.RETRY_MAX_BACKOFF = 0
warnings.filterwarnings("ignore")
def tearDown(self):
lightly.api.utils.RETRY_MAX_RETRIES = self._max_retries
lightly.api.utils.RETRY_MAX_BACKOFF = self._max_backoff
warnings.filterwarnings("default")
def test_download_image(self):
original = _pil_image()
with tempfile.NamedTemporaryFile(suffix=".png") as file:
original.save(file.name)
for request_kwargs in [None, {"stream": False}]:
with self.subTest(request_kwargs=request_kwargs):
image = lightly.api.download.download_image(
file.name, request_kwargs=request_kwargs
)
assert _images_equal(image, original)
def test_download_prediction(self):
original = _json_prediction()
with tempfile.NamedTemporaryFile(suffix=".json", mode="w+") as file:
with open(file.name, "w") as f:
json.dump(original, f)
for request_kwargs in [None, {"stream": False}]:
with self.subTest(request_kwargs=request_kwargs):
response = lightly.api.download.download_prediction_file(
file.name,
request_kwargs=request_kwargs,
)
self.assertDictEqual(response, original)
def test_download_image_with_session(self):
session = MockedRequestsModule.Session()
original = _pil_image()
with tempfile.NamedTemporaryFile(suffix=".png") as file:
original.save(file.name)
image = lightly.api.download.download_image(file.name, session=session)
assert _images_equal(image, original)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frames(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
original = _generate_video(file.name)
frames = list(lightly.api.download.download_all_video_frames(file.name))
for frame, orig in zip(frames, original):
assert _images_equal(frame, orig)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frames_timeout(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
_generate_video(file.name)
with self.assertRaisesRegexp(
RuntimeError,
"Maximum retries exceeded.*av.error.ExitError.*Immediate exit requested.*",
):
list(
lightly.api.download.download_all_video_frames(file.name, timeout=0)
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_last_video_frame(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
n_frames = 5
original = _generate_video(file.name, n_frames=n_frames)
timestamps = list(range(1, n_frames + 1))
for timestamp in timestamps:
with self.subTest(timestamp=timestamp):
if timestamp > n_frames:
with self.assertRaises(RuntimeError):
frame = lightly.api.download.download_video_frame(
file.name, timestamp
)
else:
frame = lightly.api.download.download_video_frame(
file.name, timestamp
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frames_at_timestamps(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
n_frames = 5
original = _generate_video(file.name, n_frames=n_frames)
original_timestamps = list(range(1, n_frames + 1))
frame_indices = list(range(2, len(original) - 1, 2))
timestamps = [original_timestamps[i] for i in frame_indices]
frames = list(
lightly.api.download.download_video_frames_at_timestamps(
file.name, timestamps
)
)
self.assertEqual(len(frames), len(timestamps))
for frame, timestamp in zip(frames, frame_indices):
orig = original[timestamp]
assert _images_equal(frame, orig)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frames_at_timestamps_timeout(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
n_frames = 5
_generate_video(file.name, n_frames)
with self.assertRaisesRegexp(
RuntimeError,
"Maximum retries exceeded.*av.error.ExitError.*Immediate exit requested.*",
):
list(
lightly.api.download.download_video_frames_at_timestamps(
file.name,
timestamps=list(range(1, n_frames + 1)),
timeout=0,
)
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frames_at_timestamps_wrong_order(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
original = _generate_video(file.name)
timestamps = [2, 1]
with self.assertRaises(ValueError):
frames = list(
lightly.api.download.download_video_frames_at_timestamps(
file.name, timestamps
)
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frames_at_timestamps_emtpy(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
frames = list(
lightly.api.download.download_video_frames_at_timestamps(
file.name, timestamps=[]
)
)
self.assertEqual(len(frames), 0)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frames_restart_throws(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
original = _generate_video(file.name)
with self.assertRaises(ValueError):
# timestamp too small
frames = list(
lightly.api.download.download_all_video_frames(
file.name, timestamp=-1
)
)
# timestamp too large
frames = list(
lightly.api.download.download_all_video_frames(
file.name, timestamp=len(original) + 1
)
)
self.assertEqual(len(frames), 0)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frames_restart_at_0(self):
# relevant for restarting if the frame iterator is empty
# although it shouldn't be
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
original = _generate_video(file.name)
frames = list(
lightly.api.download.download_all_video_frames(
file.name, timestamp=None
)
)
for frame, orig in zip(frames, original):
assert _images_equal(frame, orig)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frames_restart(self):
# relevant if decoding a frame goes wrong for some reason and we
# want to try again
restart_timestamp = 3
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
original = _generate_video(file.name)
frames = list(
lightly.api.download.download_all_video_frames(
file.name, restart_timestamp
)
)
for frame, orig in zip(frames, original[2:]):
assert _images_equal(frame, orig)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frame_fps(self):
for fps in [24, 30, 60]:
with self.subTest(msg=f"fps={fps}"), tempfile.NamedTemporaryFile(
suffix=".avi"
) as file:
original = _generate_video(file.name, fps=fps)
all_frames = lightly.api.download.download_all_video_frames(
file.name,
as_pil_image=False,
)
for true_frame in all_frames:
frame = lightly.api.download.download_video_frame(
file.name,
timestamp=true_frame.pts,
as_pil_image=False,
)
assert frame.pts == true_frame.pts
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frame_timestamp_exception(self):
for fps in [24, 30, 60]:
with self.subTest(msg=f"fps={fps}"), tempfile.NamedTemporaryFile(
suffix=".avi"
) as file:
original = _generate_video(file.name, fps=fps)
# this should be the last frame and exist
frame = lightly.api.download.download_video_frame(
file.name, len(original)
)
assert _images_equal(frame, original[-1])
# timestamp after last frame
with self.assertRaises(RuntimeError):
lightly.api.download.download_video_frame(
file.name, len(original) + 1
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frame_negative_timestamp_exception(self):
for fps in [24, 30, 60]:
with self.subTest(msg=f"fps={fps}"), tempfile.NamedTemporaryFile(
suffix=".avi"
) as file:
_generate_video(file.name, fps=fps)
with self.assertRaises(ValueError):
lightly.api.download.download_video_frame(file.name, -1)
def test_download_and_write_file(self):
original = _pil_image()
with tempfile.NamedTemporaryFile(
suffix=".png"
) as file1, tempfile.NamedTemporaryFile(suffix=".png") as file2:
original.save(file1.name)
lightly.api.download.download_and_write_file(file1.name, file2.name)
image = Image.open(file2.name)
assert _images_equal(original, image)
def test_download_and_write_file_with_session(self):
session = MockedRequestsModule.Session()
original = _pil_image()
with tempfile.NamedTemporaryFile(
suffix=".png"
) as file1, tempfile.NamedTemporaryFile(suffix=".png") as file2:
original.save(file1.name)
lightly.api.download.download_and_write_file(
file1.name, file2.name, session=session
)
image = Image.open(file2.name)
assert _images_equal(original, image)
def test_download_and_write_all_files(self):
n_files = 3
max_workers = 2
originals = [_pil_image(seed=i) for i in range(n_files)]
filenames = [f"filename_{i}.png" for i in range(n_files)]
with tempfile.TemporaryDirectory() as tempdir1, tempfile.TemporaryDirectory() as tempdir2:
for request_kwargs in [None, {"stream": False}]:
with self.subTest(request_kwargs=request_kwargs):
# save images at "remote" location
urls = [
os.path.join(tempdir1, f"url_{i}.png") for i in range(n_files)
]
for image, url in zip(originals, urls):
image.save(url)
# download images from remote to local
file_infos = list(zip(filenames, urls))
lightly.api.download.download_and_write_all_files(
file_infos,
output_dir=tempdir2,
max_workers=max_workers,
request_kwargs=request_kwargs,
)
for orig, filename in zip(originals, filenames):
image = Image.open(os.path.join(tempdir2, filename))
assert _images_equal(orig, image)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frame_count(self):
fps = 24
for true_n_frames in [24, 30, 60]:
for suffix in [".avi", ".mpeg"]:
with tempfile.NamedTemporaryFile(suffix=suffix) as file, self.subTest(
msg=f"n_frames={true_n_frames}, extension={suffix}"
):
_generate_video(file.name, n_frames=true_n_frames, fps=fps)
n_frames = lightly.api.download.video_frame_count(file.name)
assert n_frames == true_n_frames
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_Frame_count_timeout(self):
with tempfile.NamedTemporaryFile(suffix=".avi") as file:
_generate_video(file.name)
with self.assertRaisesRegexp(
RuntimeError,
"Maximum retries exceeded.*av.error.ExitError.*Immediate exit requested.*",
):
lightly.api.download.video_frame_count(file.name, timeout=0)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_video_frame_count_no_metadata(self):
fps = 24
for true_n_frames in [24, 30, 60]:
for suffix in [".avi", ".mpeg"]:
with tempfile.NamedTemporaryFile(suffix=suffix) as file, self.subTest(
msg=f"n_frames={true_n_frames}, extension={suffix}"
):
_generate_video(file.name, n_frames=true_n_frames, fps=fps)
n_frames = lightly.api.download.video_frame_count(
file.name, ignore_metadata=True
)
assert n_frames == true_n_frames
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frame_counts(self):
true_n_frames = [3, 5]
fps = 24
for suffix in [".avi", ".mpeg"]:
with tempfile.NamedTemporaryFile(
suffix=suffix
) as file1, tempfile.NamedTemporaryFile(
suffix=suffix
) as file2, self.subTest(
msg=f"extension={suffix}"
):
_generate_video(file1.name, n_frames=true_n_frames[0], fps=fps)
_generate_video(file2.name, n_frames=true_n_frames[1], fps=fps)
frame_counts = lightly.api.download.all_video_frame_counts(
urls=[file1.name, file2.name],
)
assert sum(frame_counts) == sum(true_n_frames)
assert frame_counts == true_n_frames
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frame_counts_broken(self):
fps = 24
n_frames = 5
with tempfile.NamedTemporaryFile(
suffix=".mpeg"
) as file1, tempfile.NamedTemporaryFile(suffix=".mpeg") as file2:
_generate_video(file1.name, fps=fps, n_frames=n_frames)
_generate_video(file2.name, fps=fps, broken=True)
urls = [file1.name, file2.name]
result = lightly.api.download.all_video_frame_counts(urls)
assert result == [n_frames, None]
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frame_counts_broken_ignore_exceptions(self):
fps = 24
n_frames = 5
with tempfile.NamedTemporaryFile(
suffix=".mpeg"
) as file1, tempfile.NamedTemporaryFile(suffix=".mpeg") as file2:
_generate_video(file1.name, fps=fps, n_frames=n_frames)
_generate_video(file2.name, fps=fps, broken=True)
urls = [file1.name, file2.name]
with self.assertRaises(RuntimeError):
result = lightly.api.download.all_video_frame_counts(
urls,
exceptions_indicating_empty_video=tuple(),
)
@unittest.skipUnless(AV_AVAILABLE, "Pyav not installed")
def test_download_all_video_frame_counts_progress_bar(self):
true_n_frames = [3, 5]
fps = 24
pbar = mock.Mock(wraps=tqdm.tqdm(unit="videos"))
with tempfile.NamedTemporaryFile(
suffix=".avi"
) as file1, tempfile.NamedTemporaryFile(suffix=".avi") as file2:
_generate_video(file1.name, n_frames=true_n_frames[0], fps=fps)
_generate_video(file2.name, n_frames=true_n_frames[1], fps=fps)
frame_counts = lightly.api.download.all_video_frame_counts(
urls=[file1.name, file2.name],
progress_bar=pbar,
)
assert sum(frame_counts) == sum(true_n_frames)
assert frame_counts == true_n_frames
assert pbar.update.call_count == len(true_n_frames)
def _images_equal(image1, image2):
# note that images saved and loaded from disk must
# use a lossless format, otherwise this equality will not hold
return np.all(np.array(image1) == np.array(image2))
def _pil_image(width=100, height=50, seed=0):
np.random.seed(seed)
image = (np.random.randn(width, height, 3) * 255).astype(np.uint8)
image = Image.fromarray(image, mode="RGB")
return image
def _json_prediction():
return {
"string": "Hello World",
"int": 1,
"float": 0.5,
}
def _generate_video(
out_file,
n_frames=5,
width=100,
height=50,
seed=0,
fps=24,
broken=False,
):
"""Generate a video.
Use .avi extension if you want to save a lossless video. Use '.mpeg' for
videos which should have streams.frames = 0, so that the whole video must
be loaded to find the total number of frames. Note that mpeg requires
fps = 24.
"""
is_mpeg = out_file.endswith(".mpeg")
video_format = "libx264rgb"
pixel_format = "rgb24"
if is_mpeg:
video_format = "mpeg1video"
pixel_format = "yuv420p"
if broken:
n_frames = 0
np.random.seed(seed)
container = av.open(out_file, mode="w")
stream = container.add_stream(video_format, rate=fps)
stream.width = width
stream.height = height
stream.pix_fmt = pixel_format
if is_mpeg:
frames = [av.VideoFrame(width, height, pixel_format) for i in range(n_frames)]
else:
# save lossless video
stream.options["crf"] = "0"
images = (np.random.randn(n_frames, height, width, 3) * 255).astype(np.uint8)
frames = [
av.VideoFrame.from_ndarray(image, format=pixel_format) for image in images
]
for frame in frames:
for packet in stream.encode(frame):
container.mux(packet)
if not broken:
# flush the stream
# video cannot be loaded if this is omitted
packet = stream.encode(None)
container.mux(packet)
container.close()
pil_images = [frame.to_image() for frame in frames]
return pil_images
| 23,691 | 37.90312 | 98 | py |
lightly | lightly-master/tests/api/test_patch.py | import logging
import pickle
from lightly.openapi_generated.swagger_client import Configuration
def test_make_swagger_configuration_picklable() -> None:
config = Configuration()
# Fix value to make test reproducible on systems with different number of cpus.
config.connection_pool_maxsize = 4
new_config = pickle.loads(pickle.dumps(config))
expected = {
"_Configuration__debug": False,
"_Configuration__logger_file": None,
"_Configuration__logger_format": "%(asctime)s %(levelname)s %(message)s",
"api_key_prefix": {},
"api_key": {},
"assert_hostname": None,
"cert_file": None,
"client_side_validation": True,
"connection_pool_maxsize": 4,
"_base_path": "https://api.lightly.ai",
"key_file": None,
"logger_file_handler": None,
# "logger_formatter", ignore because a new object is created on unpickle
# "logger_stream_handler", ignore because a new object is created on unpickle
"logger": {
"package_logger": logging.getLogger(
"lightly.openapi_generated.swagger_client"
),
"urllib3_logger": logging.getLogger("urllib3"),
},
"password": None,
"proxy": None,
"refresh_api_key_hook": None,
"safe_chars_for_path_param": "",
"ssl_ca_cert": None,
"temp_folder_path": None,
"username": None,
"verify_ssl": True,
}
# Check that all expected values are set except the ignored ones.
assert all(hasattr(config, key) for key in expected.keys())
# Check that new_config values are equal to expected values.
assert all(new_config.__dict__[key] == value for key, value in expected.items())
# Extra assertions for attributes ignored in the tests above.
assert isinstance(new_config.__dict__["logger_formatter"], logging.Formatter)
assert isinstance(
new_config.__dict__["logger_stream_handler"], logging.StreamHandler
)
| 2,017 | 37.075472 | 85 | py |
lightly | lightly-master/tests/api/test_rest_parser.py | import unittest
import numpy as np
from lightly.openapi_generated.swagger_client import (
ActiveLearningScoreCreateRequest,
ApiClient,
SamplingMethod,
ScoresApi,
)
from lightly.openapi_generated.swagger_client.rest import ApiException
class TestRestParser(unittest.TestCase):
@unittest.skip("This test only shows the error, it does not ensure it is solved.")
def test_parse_active_learning_scores(self):
score_value_tuple = (
np.random.normal(0, 1, size=(999,)).astype(np.float32),
np.random.normal(0, 1, size=(999,)).astype(np.float64),
[12.0] * 999,
)
api_client = ApiClient()
self._scores_api = ScoresApi(api_client)
for i, score_values in enumerate(score_value_tuple):
with self.subTest(i=i, msg=str(type(score_values))):
body = ActiveLearningScoreCreateRequest(
score_type=SamplingMethod.CORESET, scores=list(score_values)
)
if isinstance(score_values[0], float):
with self.assertRaises(ApiException):
self._scores_api.create_or_update_active_learning_score_by_tag_id(
body, dataset_id="dataset_id_xyz", tag_id="tag_id_xyz"
)
else:
with self.assertRaises(AttributeError):
self._scores_api.create_or_update_active_learning_score_by_tag_id(
body, dataset_id="dataset_id_xyz", tag_id="tag_id_xyz"
)
| 1,593 | 39.871795 | 90 | py |
lightly | lightly-master/tests/api/test_swagger_api_client.py | import pickle
from pytest_mock import MockerFixture
from lightly.api.swagger_api_client import LightlySwaggerApiClient
from lightly.api.swagger_rest_client import LightlySwaggerRESTClientObject
from lightly.openapi_generated.swagger_client import Configuration
from lightly.openapi_generated.swagger_client.rest import RESTResponse
def test_pickle(mocker: MockerFixture) -> None:
client = LightlySwaggerApiClient(configuration=Configuration(), timeout=5)
client.last_response = mocker.MagicMock(spec_set=RESTResponse).return_value
new_client = pickle.loads(pickle.dumps(client))
expected = {
"_pool": None,
"client_side_validation": True,
# "configuration", ignore because some parts of configuration are recreated on unpickling
"cookie": None,
"default_headers": {"User-Agent": "OpenAPI-Generator/1.0.0/python"},
# "last_response", ignore because it is not copied during pickling
# "rest_client", ignore because some parts of rest client are recreated on unpickling
}
# Check that all expected values are set except the ignored ones.
assert all(hasattr(client, key) for key in expected.keys())
# Check that new client values are equal to expected values.
assert all(new_client.__dict__[key] == value for key, value in expected.items())
# Extra assertions for attributes ignored in the tests above.
assert isinstance(new_client.__dict__["configuration"], Configuration)
assert isinstance(
new_client.__dict__["rest_client"], LightlySwaggerRESTClientObject
)
# Last reponse is completely removed from client object and is only dynamically
# reassigned in the ApiClient.__call_api method.
assert not hasattr(new_client, "last_response")
| 1,769 | 45.578947 | 97 | py |
lightly | lightly-master/tests/api/test_swagger_rest_client.py | import pickle
from pytest_mock import MockerFixture
from urllib3 import PoolManager, Timeout
from lightly.api.swagger_rest_client import LightlySwaggerRESTClientObject
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TestLightlySwaggerRESTClientObject:
def test__pickle(self) -> None:
client = LightlySwaggerRESTClientObject(
configuration=Configuration(), timeout=5
)
new_client = pickle.loads(pickle.dumps(client))
expected = {
# "configuration", ignore because some parts of configuration are recreated on unpickling
"maxsize": None,
# "pool_manager", ignore because pool_manager is recreated on unpickling
"pools_size": 4,
"timeout": 5,
}
# Check that all expected values are set except the ignored ones.
assert set(expected.keys()) == set(client.__dict__.keys()) - {
"configuration",
"pool_manager",
}
# Check that new client values are equal to expected values.
assert all(new_client.__dict__[key] == value for key, value in expected.items())
# Extra assertions for attributes ignored in the tests above.
assert isinstance(new_client.__dict__["configuration"], Configuration)
assert isinstance(new_client.__dict__["pool_manager"], PoolManager)
def test_request__timeout(self, mocker: MockerFixture) -> None:
client = LightlySwaggerRESTClientObject(
configuration=Configuration(), timeout=5
)
response = mocker.MagicMock()
response.status = 200
client.pool_manager.request = mocker.MagicMock(return_value=response)
# use default timeout
client.request(method="GET", url="some-url")
calls = client.pool_manager.request.mock_calls
_, _, kwargs = calls[0]
assert isinstance(kwargs["timeout"], Timeout)
assert kwargs["timeout"].total == 5
# use custom timeout
client.request(method="GET", url="some-url", _request_timeout=10)
calls = client.pool_manager.request.mock_calls
_, _, kwargs = calls[1]
assert isinstance(kwargs["timeout"], Timeout)
assert kwargs["timeout"].total == 10
def test_request__connection_read_timeout(self, mocker: MockerFixture) -> None:
client = LightlySwaggerRESTClientObject(
configuration=Configuration(), timeout=(1, 2)
)
response = mocker.MagicMock()
response.status = 200
client.pool_manager.request = mocker.MagicMock(return_value=response)
client.request(method="GET", url="some-url")
calls = client.pool_manager.request.mock_calls
_, _, kwargs = calls[0]
assert isinstance(kwargs["timeout"], Timeout)
assert kwargs["timeout"].connect_timeout == 1
assert kwargs["timeout"].read_timeout == 2
| 2,933 | 39.191781 | 101 | py |
lightly | lightly-master/tests/api/test_utils.py | import os
import unittest
from unittest import mock
import pytest
from PIL import Image
from lightly.api.utils import (
DatasourceType,
PIL_to_bytes,
get_lightly_server_location_from_env,
get_signed_url_destination,
getenv,
paginate_endpoint,
retry,
)
class TestUtils(unittest.TestCase):
def test_retry_success(self):
def my_func(arg, kwarg=5):
return arg + kwarg
self.assertEqual(retry(my_func, 5, kwarg=5), 10)
def test_retry_fail(self):
def my_func():
raise RuntimeError()
with self.assertRaises(RuntimeError), mock.patch("time.sleep"):
retry(my_func)
def test_getenv(self):
os.environ["TEST_ENV_VARIABLE"] = "hello world"
env = getenv("TEST_ENV_VARIABLE", "default")
self.assertEqual(env, "hello world")
def test_getenv_fail(self):
env = getenv("TEST_ENV_VARIABLE_WHICH_DOES_NOT_EXIST", "hello world")
self.assertEqual(env, "hello world")
def test_PIL_to_bytes(self):
image = Image.new("RGB", (128, 128))
# test with quality=None
PIL_to_bytes(image)
# test with quality=90
PIL_to_bytes(image, quality=90)
# test with quality=90 and ext=jpg
PIL_to_bytes(image, ext="JPEG", quality=90)
def test_get_signed_url_destination(self):
# S3
self.assertEqual(
get_signed_url_destination(
"https://lightly.s3.eu-central-1.amazonaws.com/lightly/somewhere/image.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Content-Sha256=UNSIGNED-PAYLOAD&X-Amz-Credential=0123456789%2F20220811%2Feu-central-1%2Fs3%2Faws4_request&X-Amz-Date=20220811T065010Z&X-Amz-Expires=601200&X-Amz-Signature=0123456789&X-Amz-SignedHeaders=host&x-id=GetObject"
),
DatasourceType.S3,
)
self.assertNotEqual(
get_signed_url_destination("http://someething.with.s3.in.it"),
DatasourceType.S3,
)
# GCS
self.assertEqual(
get_signed_url_destination(
"https://storage.googleapis.com/lightly/somewhere/image.jpg?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=lightly%40appspot.gserviceaccount.com%2F20220811%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20220811T065325Z&X-Goog-Expires=601201&X-Goog-SignedHeaders=host&X-Goog-Signature=01234567890"
),
DatasourceType.GCS,
)
self.assertNotEqual(
get_signed_url_destination("http://someething.with.google.in.it"),
DatasourceType.GCS,
)
# AZURE
self.assertEqual(
get_signed_url_destination(
"https://lightly.blob.core.windows.net/lightly/somewhere/image.jpg?sv=2020-08-04&ss=bfqt&srt=sco&sp=0123456789&se=2022-04-13T20:20:02Z&st=2022-04-13T12:20:02Z&spr=https&sig=0123456789"
),
DatasourceType.AZURE,
)
self.assertNotEqual(
get_signed_url_destination("http://someething.with.windows.in.it"),
DatasourceType.AZURE,
)
def test_get_lightly_server_location_from_env(self):
os.environ["LIGHTLY_SERVER_LOCATION"] = "https://api.dev.lightly.ai/ "
host = get_lightly_server_location_from_env()
self.assertEqual(host, "https://api.dev.lightly.ai")
def test_paginate_endpoint(self):
def some_function(page_size=8, page_offset=0):
if page_offset > 3 * page_size:
assert False # should not happen
elif page_offset > 2 * page_size:
return (page_size - 1) * ["a"]
else:
return page_size * ["a"]
page_size = 8
some_iterator = paginate_endpoint(some_function, page_size=page_size)
some_list = list(some_iterator)
self.assertEqual((4 * page_size - 1) * ["a"], some_list)
self.assertEqual(len(some_list), (4 * page_size - 1))
def test_paginate_endpoint__string(self):
def paginated_function(page_size=8, page_offset=0):
"""Returns one page of size page_size, then one page of size page_size - 1."""
if page_offset > 3 * page_size:
assert False # This should not happen.
elif page_offset > 2 * page_size:
return (page_size - 1) * "a"
else:
return page_size * "a"
page_size = 8
some_iterator = paginate_endpoint(paginated_function, page_size=page_size)
some_list = list(some_iterator)
self.assertEqual((4 * page_size - 1) * "a", "".join(some_list))
self.assertEqual(len(some_list), 4) # Expect four pages of strings.
def test_paginate_endpoint__multiple_of_page_size(self):
def paginated_function(page_size=8, page_offset=0):
"""Returns two pages of size page_size, then an empty page."""
if page_offset > 3 * page_size:
return []
elif page_offset > 2 * page_size:
return page_size * ["a"]
else:
return page_size * ["a"]
page_size = 8
some_iterator = paginate_endpoint(paginated_function, page_size=page_size)
some_list = list(some_iterator)
self.assertEqual((4 * page_size) * ["a"], some_list)
self.assertEqual(len(some_list), (4 * page_size))
def test_paginate_endpoint_empty(self):
def some_function(page_size=8, page_offset=0):
return []
some_iterator = paginate_endpoint(some_function, page_size=8)
some_list = list(some_iterator)
self.assertEqual(some_list, [])
| 5,647 | 36.653333 | 352 | py |
lightly | lightly-master/tests/api/test_version_checking.py | import sys
import time
import unittest
import lightly
from lightly.api.version_checking import (
LightlyAPITimeoutException,
get_latest_version,
get_minimum_compatible_version,
is_compatible_version,
is_latest_version,
pretty_print_latest_version,
)
from tests.api_workflow.mocked_api_workflow_client import MockedVersioningApi
class TestVersionChecking(unittest.TestCase):
def setUp(self) -> None:
lightly.api.version_checking.VersioningApi = MockedVersioningApi
def test_get_latest_version(self):
get_latest_version("1.2.3")
def test_get_minimum_compatible_version(self):
get_minimum_compatible_version()
def test_is_latest_version(self) -> None:
assert is_latest_version("1.2.8")
assert not is_latest_version("1.2.7")
assert not is_latest_version("1.1.8")
assert not is_latest_version("0.2.8")
def test_is_compatible_version(self) -> None:
assert is_compatible_version("1.2.1")
assert not is_compatible_version("1.2.0")
assert not is_compatible_version("1.1.9")
assert not is_compatible_version("0.2.1")
def test_pretty_print(self):
pretty_print_latest_version(current_version="curr", latest_version="1.1.1")
def test_version_check_timout_mocked(self):
"""
We cannot check for other errors as we don't know whether the
current LIGHTLY_SERVER_URL is
- unreachable (error in < 1 second)
- causing a timeout and thus raising a LightlyAPITimeoutException
- reachable (success in < 1 second
Thus this only checks that the actual lightly.do_version_check()
with needing >1s internally causes a LightlyAPITimeoutException
"""
try:
old_get_versioning_api = lightly.api.version_checking.get_versioning_api
def mocked_get_versioning_api_timeout():
time.sleep(10)
print("This line should never be reached, calling sys.exit()")
sys.exit()
lightly.api.version_checking.get_versioning_api = (
mocked_get_versioning_api_timeout
)
start_time = time.time()
with self.assertRaises(LightlyAPITimeoutException):
is_latest_version(lightly.__version__)
duration = time.time() - start_time
self.assertLess(duration, 1.5)
finally:
lightly.api.version_checking.get_versioning_api = old_get_versioning_api
| 2,524 | 32.223684 | 84 | py |
lightly | lightly-master/tests/api_workflow/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/tests/api_workflow/mocked_api_workflow_client.py | import csv
import io
import json
import tempfile
import unittest
from collections import defaultdict
from io import IOBase
from typing import *
import numpy as np
import requests
from requests import Response
import lightly
from lightly.api.api_workflow_client import ApiWorkflowClient
from lightly.openapi_generated.swagger_client.api import (
CollaborationApi,
DatasetsApi,
DatasourcesApi,
DockerApi,
EmbeddingsApi,
JobsApi,
MappingsApi,
QuotaApi,
SamplesApi,
SamplingsApi,
ScoresApi,
TagsApi,
VersioningApi,
)
from lightly.openapi_generated.swagger_client.models import (
AsyncTaskData,
CreateDockerWorkerRegistryEntryRequest,
CreateEntityResponse,
DatasetCreateRequest,
DatasetData,
DatasetEmbeddingData,
DatasourceConfig,
DatasourceConfigBase,
DatasourceProcessedUntilTimestampRequest,
DatasourceProcessedUntilTimestampResponse,
DatasourceRawSamplesData,
DatasourceRawSamplesDataRow,
DatasourceRawSamplesMetadataData,
DatasourceRawSamplesPredictionsData,
DockerRunData,
DockerRunScheduledCreateRequest,
DockerRunScheduledData,
DockerRunScheduledPriority,
DockerRunScheduledState,
DockerRunState,
DockerWorkerConfigCreateRequest,
DockerWorkerConfigV3CreateRequest,
DockerWorkerRegistryEntryData,
DockerWorkerState,
DockerWorkerType,
FilenameAndReadUrl,
InitialTagCreateRequest,
JobResultType,
JobState,
JobStatusData,
JobStatusDataResult,
LabelBoxDataRow,
LabelBoxV4DataRow,
LabelStudioTask,
LabelStudioTaskData,
SampleCreateRequest,
SampleData,
SampleDataModes,
SampleMetaData,
SamplePartialMode,
SampleUpdateRequest,
SampleWriteUrls,
SamplingCreateRequest,
SharedAccessConfigCreateRequest,
SharedAccessConfigData,
SharedAccessType,
TagArithmeticsRequest,
TagBitMaskResponse,
TagCreator,
TagData,
Trigger2dEmbeddingJobRequest,
WriteCSVUrlData,
)
from lightly.openapi_generated.swagger_client.rest import ApiException
from tests.api_workflow import utils
def _check_dataset_id(dataset_id: str):
if not isinstance(dataset_id, str) or len(dataset_id) == 0:
raise ApiException(status=400, reason="Invalid dataset id.")
N_FILES_ON_SERVER = 100
class MockedEmbeddingsApi(EmbeddingsApi):
def __init__(self, api_client):
EmbeddingsApi.__init__(self, api_client=api_client)
self.embeddings = [
DatasetEmbeddingData(
id=utils.generate_id(),
name="embedding_newest",
is_processed=True,
created_at=1111111,
),
DatasetEmbeddingData(
id=utils.generate_id(),
name="default",
is_processed=True,
created_at=0,
),
]
def get_embeddings_csv_write_url_by_id(self, dataset_id: str, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
response_ = WriteCSVUrlData(
signed_write_url="signed_write_url_valid", embedding_id=utils.generate_id()
)
return response_
def get_embeddings_by_dataset_id(
self, dataset_id, **kwargs
) -> List[DatasetEmbeddingData]:
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
return self.embeddings
def trigger2d_embeddings_job(
self, trigger2d_embedding_job_request, dataset_id, embedding_id, **kwargs
):
_check_dataset_id(dataset_id)
assert isinstance(trigger2d_embedding_job_request, Trigger2dEmbeddingJobRequest)
def get_embeddings_csv_read_url_by_id(self, dataset_id, embedding_id, **kwargs):
_check_dataset_id(dataset_id)
return "https://my-embedding-read-url.com"
class MockedSamplingsApi(SamplingsApi):
def trigger_sampling_by_id(
self, body: SamplingCreateRequest, dataset_id, embedding_id, **kwargs
):
_check_dataset_id(dataset_id)
assert isinstance(body, SamplingCreateRequest)
assert isinstance(dataset_id, str)
assert isinstance(embedding_id, str)
response_ = AsyncTaskData(job_id="155")
return response_
class MockedJobsApi(JobsApi):
def __init__(self, *args, **kwargs):
self.no_calls = 0
JobsApi.__init__(self, *args, **kwargs)
def get_job_status_by_id(self, job_id, **kwargs):
assert isinstance(job_id, str)
self.no_calls += 1
if self.no_calls > 3:
result = JobStatusDataResult(
type=JobResultType.SAMPLING, data="selection_tag_id_xyz"
)
response_ = JobStatusData(
id="id_",
status=JobState.FINISHED,
wait_time_till_next_poll=0,
created_at=1234,
finished_at=1357,
result=result,
)
else:
result = None
response_ = JobStatusData(
id="id_",
status=JobState.RUNNING,
wait_time_till_next_poll=0.001,
created_at=1234,
result=result,
)
return response_
class MockedTagsApi(TagsApi):
def create_initial_tag_by_dataset_id(
self, initial_tag_create_request, dataset_id, **kwargs
):
_check_dataset_id(dataset_id)
assert isinstance(initial_tag_create_request, InitialTagCreateRequest)
assert isinstance(dataset_id, str)
response_ = CreateEntityResponse(id=utils.generate_id())
return response_
def get_tag_by_tag_id(self, dataset_id, tag_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(dataset_id, str)
assert isinstance(tag_id, str)
response_ = TagData(
id=tag_id,
dataset_id=dataset_id,
prev_tag_id=utils.generate_id(),
bit_mask_data="0x80bda23e9",
name="second-tag",
tot_size=15,
created_at=1577836800,
changes=[],
)
return response_
def get_tags_by_dataset_id(self, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
tag_1 = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0xf",
name="initial-tag",
tot_size=4,
created_at=1577836800,
changes=[],
)
tag_2 = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=tag_1.id,
bit_mask_data="0xf",
name="query_tag_name_xyz",
tot_size=4,
created_at=1577836800,
changes=[],
)
tag_3 = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=tag_1.id,
bit_mask_data="0x1",
name="preselected_tag_name_xyz",
tot_size=4,
created_at=1577836800,
changes=[],
)
tag_4 = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=tag_3.id,
bit_mask_data="0x3",
name="selected_tag_xyz",
tot_size=4,
created_at=1577836800,
changes=[],
)
tag_5 = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name="1000",
tot_size=4,
created_at=1577836800,
changes=[],
)
tags = [tag_1, tag_2, tag_3, tag_4, tag_5]
no_tags_to_return = getattr(self, "no_tags", 5)
tags = tags[:no_tags_to_return]
return tags
def perform_tag_arithmetics(
self, tag_arithmetics_request: TagArithmeticsRequest, dataset_id, **kwargs
):
_check_dataset_id(dataset_id)
if (tag_arithmetics_request.new_tag_name is None) or (
tag_arithmetics_request.new_tag_name == ""
):
return TagBitMaskResponse(bit_mask_data="0x2")
else:
return CreateEntityResponse(id="tag-arithmetic-created")
def perform_tag_arithmetics_bitmask(
self, tag_arithmetics_request: TagArithmeticsRequest, dataset_id, **kwargs
):
_check_dataset_id(dataset_id)
return TagBitMaskResponse(bit_mask_data="0x2")
def upsize_tags_by_dataset_id(self, tag_upsize_request, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
assert tag_upsize_request.upsize_tag_creator in (
TagCreator.USER_PIP,
TagCreator.USER_PIP_LIGHTLY_MAGIC,
)
def create_tag_by_dataset_id(
self, tag_create_request, dataset_id, **kwargs
) -> TagData:
_check_dataset_id(dataset_id)
tag = TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=tag_create_request["prev_tag_id"],
bit_mask_data=tag_create_request["bit_mask_data"],
name=tag_create_request["name"],
tot_size=10,
created_at=1577836800,
changes=[],
)
return tag
def delete_tag_by_tag_id(self, dataset_id, tag_id, **kwargs):
_check_dataset_id(dataset_id)
tags = self.get_tags_by_dataset_id(dataset_id)
# assert that tag exists
assert any([tag.id == tag_id for tag in tags])
# assert that tag is a leaf
assert all([tag.prev_tag_id != tag_id for tag in tags])
def export_tag_to_label_studio_tasks(
self, dataset_id: str, tag_id: str, **kwargs
) -> List[Dict]:
if kwargs["page_offset"] and kwargs["page_offset"] > 0:
return []
return [
LabelStudioTask(
id=0,
data=LabelStudioTaskData(
image="https://api.lightly.ai/v1/datasets/62383ab8f9cb290cd83ab5f9/samples/62383cb7e6a0f29e3f31e213/readurlRedirect?type=full&CENSORED",
lightly_file_name="2008_006249_jpg.rf.fdd64460945ca901aa3c7e48ffceea83.jpg",
lightly_meta_info=SampleData(
id="sample_id_0",
type="IMAGE",
dataset_id=dataset_id,
file_name="2008_006249_jpg.rf.fdd64460945ca901aa3c7e48ffceea83.jpg",
exif={},
index=0,
created_at=1647852727873,
last_modified_at=1647852727873,
meta_data=SampleMetaData(
sharpness=27.31265790443818,
size_in_bytes=48224,
snr=2.1969673926211217,
mean=[
0.24441662557257224,
0.4460417517905863,
0.6960984853824035,
],
shape=[167, 500, 3],
std=[
0.12448681278605961,
0.09509570033043004,
0.0763725998175394,
],
sum_of_squares=[
6282.243860049413,
17367.702452895475,
40947.22059208768,
],
sum_of_values=[
20408.78823530978,
37244.486274513954,
58124.22352943069,
],
),
),
),
).to_dict() # temporary until we have a proper openapi generator
]
def export_tag_to_label_box_data_rows(
self, dataset_id: str, tag_id: str, **kwargs
) -> List[Dict]:
if kwargs["page_offset"] and kwargs["page_offset"] > 0:
return []
return [
LabelBoxDataRow(
external_id="2008_007291_jpg.rf.2fca436925b52ea33cf897125a34a2fb.jpg",
image_url="https://api.lightly.ai/v1/datasets/62383ab8f9cb290cd83ab5f9/samples/62383cb7e6a0f29e3f31e233/readurlRedirect?type=CENSORED",
).to_dict() # temporary until we have a proper openapi generator
]
def export_tag_to_label_box_v4_data_rows(
self, dataset_id: str, tag_id: str, **kwargs
) -> List[Dict]:
if kwargs["page_offset"] and kwargs["page_offset"] > 0:
return []
return [
LabelBoxV4DataRow(
row_data="http://localhost:5000/v1/datasets/6401d4534d2ed9112da782f5/samples/6401e455a6045a7faa79b20a/readurlRedirect?type=full&publicToken=token",
global_key="image.png",
media_type="IMAGE",
).to_dict() # temporary until we have a proper openapi generator
]
def export_tag_to_basic_filenames_and_read_urls(
self, dataset_id: str, tag_id: str, **kwargs
) -> List[Dict]:
if kwargs["page_offset"] and kwargs["page_offset"] > 0:
return []
return [
FilenameAndReadUrl(
file_name="export-basic-test-sample-0.png",
read_url="https://storage.googleapis.com/somwhere/export-basic-test-sample-0.png?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=CENSORED",
).to_dict() # temporary until we have a proper openapi generator
]
def export_tag_to_basic_filenames(
self, dataset_id: str, tag_id: str, **kwargs
) -> str:
if kwargs["page_offset"] and kwargs["page_offset"] > 0:
return ""
return """
IMG_2276_jpeg_jpg.rf.7411b1902c81bad8cdefd2cc4eb3a97b.jpg
IMG_2285_jpeg_jpg.rf.4a93d99b9f0b6cccfb27bf2f4a13b99e.jpg
IMG_2274_jpeg_jpg.rf.2f319e949748145fb22dcb52bb325a0c.jpg
"""
class MockedScoresApi(ScoresApi):
def create_or_update_active_learning_score_by_tag_id(
self, body, dataset_id, tag_id, **kwargs
) -> CreateEntityResponse:
_check_dataset_id(dataset_id)
if len(body.scores) > 0 and not isinstance(body.scores[0], float):
raise AttributeError
response_ = CreateEntityResponse(id="selected_tag_id_xyz")
return response_
class MockedMappingsApi(MappingsApi):
def __init__(self, samples_api, *args, **kwargs):
self._samples_api = samples_api
MappingsApi.__init__(self, *args, **kwargs)
self.n_samples = N_FILES_ON_SERVER
sample_names = [f"img_{i}.jpg" for i in range(self.n_samples)]
sample_names.reverse()
self.sample_names = sample_names
def get_sample_mappings_by_dataset_id(self, dataset_id, field, **kwargs):
if dataset_id == "xyz-no-tags":
return []
return self.sample_names[: self.n_samples]
class MockedSamplesApi(SamplesApi):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sample_create_requests: List[SampleCreateRequest] = []
def get_samples_by_dataset_id(self, dataset_id, **kwargs) -> List[SampleData]:
samples = []
for i, body in enumerate(self.sample_create_requests):
sample = SampleData(
id=f"{i}_xyz",
dataset_id="dataset_id_xyz",
file_name=body.file_name,
type="Images",
)
samples.append(sample)
return samples
def get_samples_partial_by_dataset_id(
self,
dataset_id="dataset_id_xyz",
mode: SamplePartialMode = SamplePartialMode.FULL,
**kwargs,
) -> List[SampleData]:
samples = []
for i, body in enumerate(self.sample_create_requests):
if mode == SamplePartialMode.IDS:
sample = SampleDataModes(id=f"{i}_xyz")
elif mode == SamplePartialMode.FILENAMES:
sample = SampleDataModes(
id=f"{i}_xyz",
file_name=body.file_name,
)
else:
sample = SampleDataModes(
id=f"{i}_xyz",
dataset_id=dataset_id,
file_name=body.file_name,
type="Images",
)
samples.append(sample)
return samples
def create_sample_by_dataset_id(self, body, dataset_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, SampleCreateRequest)
response_ = CreateEntityResponse(id="xyz")
self.sample_create_requests.append(body)
return response_
def get_sample_image_write_url_by_id(
self, dataset_id, sample_id, is_thumbnail, **kwargs
):
_check_dataset_id(dataset_id)
url = f"{sample_id}_write_url"
return url
def get_sample_image_read_url_by_id(self, dataset_id, sample_id, type, **kwargs):
_check_dataset_id(dataset_id)
url = f"{sample_id}_write_url"
return url
def get_sample_image_write_urls_by_id(
self, dataset_id, sample_id, **kwargs
) -> SampleWriteUrls:
_check_dataset_id(dataset_id)
thumb_url = f"{sample_id}_thumb_write_url"
full_url = f"{sample_id}_full_write_url"
ret = SampleWriteUrls(full=full_url, thumb=thumb_url)
return ret
def update_sample_by_id(self, body, dataset_id, sample_id, **kwargs):
_check_dataset_id(dataset_id)
assert isinstance(body, SampleUpdateRequest)
class MockedDatasetsApi(DatasetsApi):
def __init__(self, api_client):
no_datasets = 3
self._default_datasets = [
DatasetData(
name=f"dataset_{i}",
id=utils.generate_id(),
last_modified_at=i,
type="Images",
img_type="full",
size_in_bytes=-1,
n_samples=-1,
created_at=0,
user_id="user_0",
)
for i in range(no_datasets)
]
self._shared_datasets = [
DatasetData(
name=f"shared_dataset_{i}",
id=utils.generate_id(),
last_modified_at=0,
type="Images",
img_type="full",
size_in_bytes=-1,
n_samples=-1,
created_at=0,
user_id="another_user",
)
for i in range(2)
]
self.reset()
@property
def _all_datasets(self) -> List[DatasetData]:
return [*self.datasets, *self.shared_datasets]
def reset(self):
self.datasets = self._default_datasets
self.shared_datasets = self._shared_datasets
def get_datasets(
self,
shared: bool = False,
get_assets_of_team: bool = False,
page_size: Optional[int] = None,
page_offset: Optional[int] = None,
):
start, end = _start_and_end_offset(page_size=page_size, page_offset=page_offset)
if get_assets_of_team:
return []
if shared:
return self.shared_datasets[start:end]
else:
return self.datasets[start:end]
def create_dataset(self, dataset_create_request: DatasetCreateRequest, **kwargs):
assert isinstance(dataset_create_request, DatasetCreateRequest)
id = utils.generate_id()
if dataset_create_request.name == "xyz-no-tags":
id = "xyz-no-tags"
dataset = DatasetData(
id=id,
name=dataset_create_request.name,
last_modified_at=len(self.datasets) + 1,
type="Images",
size_in_bytes=-1,
n_samples=-1,
created_at=-1,
user_id=utils.generate_id(),
)
self.datasets.append(dataset)
response_ = CreateEntityResponse(id=id)
return response_
def get_dataset_by_id(self, dataset_id):
_check_dataset_id(dataset_id)
dataset = next(
(dataset for dataset in self._all_datasets if dataset_id == dataset.id),
None,
)
if dataset is None:
raise ApiException(status=404, reason="Not found")
return dataset
def register_dataset_upload_by_id(self, body, dataset_id):
_check_dataset_id(dataset_id)
return True
def delete_dataset_by_id(self, dataset_id, **kwargs) -> None:
_check_dataset_id(dataset_id)
datasets_without_that_id = [
dataset for dataset in self.datasets if dataset.id != dataset_id
]
assert len(datasets_without_that_id) == len(self.datasets) - 1
self.datasets = datasets_without_that_id
def get_children_of_dataset_id(self, dataset_id, **kwargs):
raise NotImplementedError()
def get_datasets_enriched(self, **kwargs):
raise NotImplementedError()
def get_datasets_query_by_name(
self,
dataset_name: str,
page_size: Optional[int] = None,
page_offset: Optional[int] = None,
shared: bool = False,
exact: bool = False,
get_assets_of_team: bool = False,
) -> List[DatasetData]:
datasets = self.get_datasets(
shared=shared,
get_assets_of_team=get_assets_of_team,
page_size=page_size,
page_offset=page_offset,
)
if exact:
return [dataset for dataset in datasets if dataset.name == dataset_name]
else:
return [
dataset
for dataset in datasets
if dataset.name is not None and dataset.name.startswith(dataset_name)
]
def update_dataset_by_id(self, body, dataset_id, **kwargs):
raise NotImplementedError()
class MockedDatasourcesApi(DatasourcesApi):
def __init__(self, api_client=None):
super().__init__(api_client=api_client)
# maximum numbers of samples returned by list raw samples request
self._max_return_samples = 2
# default number of samples in every datasource
self._num_samples = 5
self.reset()
def reset(self):
local_datasource = DatasourceConfigBase(
type="LOCAL", full_path="", purpose="INPUT_OUTPUT"
).to_dict()
azure_datasource = DatasourceConfigBase(
type="AZURE", full_path="", purpose="INPUT_OUTPUT"
).to_dict()
self._datasources = {
"dataset_id_xyz": local_datasource,
"dataset_0": azure_datasource,
}
self._processed_until_timestamp = defaultdict(lambda: 0)
self._samples = defaultdict(self._default_samples)
def _default_samples(self):
return [
DatasourceRawSamplesDataRow(file_name=f"file_{i}", read_url=f"url_{i}")
for i in range(self._num_samples)
]
def get_datasource_by_dataset_id(self, dataset_id: str, **kwargs):
try:
datasource = self._datasources[dataset_id]
return datasource
except Exception:
raise ApiException()
def get_datasource_processed_until_timestamp_by_dataset_id(
self, dataset_id: str, **kwargs
) -> DatasourceProcessedUntilTimestampResponse:
timestamp = self._processed_until_timestamp[dataset_id]
return DatasourceProcessedUntilTimestampResponse(timestamp)
def get_list_of_raw_samples_from_datasource_by_dataset_id(
self,
dataset_id,
cursor: str = None,
_from: int = None,
to: int = None,
relevant_filenames_file_name: str = -1,
use_redirected_read_url: bool = False,
**kwargs,
) -> DatasourceRawSamplesData:
if relevant_filenames_file_name == -1:
samples = self._samples[dataset_id]
elif (
isinstance(relevant_filenames_file_name, str)
and len(relevant_filenames_file_name) > 0
):
samples = self._samples[dataset_id][::2]
else:
raise RuntimeError("DATASET_DATASOURCE_RELEVANT_FILENAMES_INVALID")
if cursor is None:
# initial request
assert _from is not None
assert to is not None
cursor_dict = {"from": _from, "to": to}
current = _from
else:
# follow up request
cursor_dict = json.loads(cursor)
current = cursor_dict["current"]
to = cursor_dict["to"]
next_current = min(current + self._max_return_samples, to + 1)
samples = samples[current:next_current]
cursor_dict["current"] = next_current
cursor = json.dumps(cursor_dict)
has_more = len(samples) > 0
return DatasourceRawSamplesData(
has_more=has_more,
cursor=cursor,
data=samples,
)
def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(
self,
dataset_id: str,
task_name: str,
cursor: str = None,
_from: int = None,
to: int = None,
use_redirected_read_url: bool = False,
**kwargs,
) -> DatasourceRawSamplesPredictionsData:
if cursor is None:
# initial request
assert _from is not None
assert to is not None
cursor_dict = {"from": _from, "to": to}
current = _from
else:
# follow up request
cursor_dict = json.loads(cursor)
current = cursor_dict["current"]
to = cursor_dict["to"]
next_current = min(current + self._max_return_samples, to + 1)
samples = self._samples[dataset_id][current:next_current]
cursor_dict["current"] = next_current
cursor = json.dumps(cursor_dict)
has_more = len(samples) > 0
return DatasourceRawSamplesPredictionsData(
has_more=has_more,
cursor=cursor,
data=samples,
)
def get_list_of_raw_samples_metadata_from_datasource_by_dataset_id(
self,
dataset_id: str,
cursor: str = None,
_from: int = None,
to: int = None,
use_redirected_read_url: bool = False,
**kwargs,
) -> DatasourceRawSamplesMetadataData:
if cursor is None:
# initial request
assert _from is not None
assert to is not None
cursor_dict = {"from": _from, "to": to}
current = _from
else:
# follow up request
cursor_dict = json.loads(cursor)
current = cursor_dict["current"]
to = cursor_dict["to"]
next_current = min(current + self._max_return_samples, to + 1)
samples = self._samples[dataset_id][current:next_current]
cursor_dict["current"] = next_current
cursor = json.dumps(cursor_dict)
has_more = len(samples) > 0
return DatasourceRawSamplesMetadataData(
has_more=has_more,
cursor=cursor,
data=samples,
)
def get_prediction_file_read_url_from_datasource_by_dataset_id(
self, *args, **kwargs
):
return "https://my-read-url.com"
def update_datasource_by_dataset_id(
self, body: DatasourceConfig, dataset_id: str, **kwargs
) -> None:
# TODO: Enable assert once we switch/update to new api code generator.
# assert isinstance(body, DatasourceConfig)
self._datasources[dataset_id] = body # type: ignore
def update_datasource_processed_until_timestamp_by_dataset_id(
self, body, dataset_id, **kwargs
) -> None:
assert isinstance(body, DatasourceProcessedUntilTimestampRequest)
to = body.processed_until_timestamp
self._processed_until_timestamp[dataset_id] = to # type: ignore
class MockedComputeWorkerApi(DockerApi):
def __init__(self, api_client=None):
super().__init__(api_client=api_client)
self._compute_worker_runs = [
DockerRunData(
id=utils.generate_id(),
user_id="user-id",
docker_version="v1",
dataset_id=utils.generate_id(),
state=DockerRunState.TRAINING,
created_at=0,
last_modified_at=100,
message=None,
artifacts=[],
)
]
self._scheduled_compute_worker_runs = [
DockerRunScheduledData(
id=utils.generate_id(),
dataset_id=utils.generate_id(),
config_id=utils.generate_id(),
priority=DockerRunScheduledPriority.MID,
state=DockerRunScheduledState.OPEN,
created_at=0,
last_modified_at=100,
owner=utils.generate_id(),
runs_on=[],
)
]
self._registered_workers = [
DockerWorkerRegistryEntryData(
id=utils.generate_id(),
user_id="user-id",
name="worker-name-1",
worker_type=DockerWorkerType.FULL,
state=DockerWorkerState.OFFLINE,
created_at=0,
last_modified_at=0,
labels=["label-1"],
)
]
def register_docker_worker(self, body, **kwargs):
assert isinstance(body, CreateDockerWorkerRegistryEntryRequest)
return CreateEntityResponse(id=utils.generate_id())
def get_docker_worker_registry_entries(self, **kwargs):
return self._registered_workers
def create_docker_worker_config(self, body, **kwargs):
assert isinstance(body, DockerWorkerConfigCreateRequest)
return CreateEntityResponse(id=utils.generate_id())
def create_docker_worker_config_v3(self, body, **kwargs):
assert isinstance(body, DockerWorkerConfigV3CreateRequest)
return CreateEntityResponse(id=utils.generate_id())
def create_docker_run_scheduled_by_dataset_id(
self, docker_run_scheduled_create_request, dataset_id, **kwargs
):
assert isinstance(
docker_run_scheduled_create_request, DockerRunScheduledCreateRequest
)
_check_dataset_id(dataset_id)
return CreateEntityResponse(id=utils.generate_id())
def get_docker_runs(
self,
page_size: Optional[int] = None,
page_offset: Optional[int] = None,
**kwargs,
):
start, end = _start_and_end_offset(page_size=page_size, page_offset=page_offset)
return self._compute_worker_runs[start:end]
def get_docker_runs_count(self, **kwargs):
return len(self._compute_worker_runs)
def get_docker_runs_scheduled_by_dataset_id(
self, dataset_id, state: Optional[str] = None, **kwargs
):
runs = self._scheduled_compute_worker_runs
runs = [run for run in runs if run.dataset_id == dataset_id]
return runs
def cancel_scheduled_docker_run_state_by_id(
self, dataset_id: str, scheduled_id: str, **kwargs
):
raise NotImplementedError()
def confirm_docker_run_artifact_creation(
self, run_id: str, artifact_id: str, **kwargs
):
raise NotImplementedError()
def create_docker_run(self, body, **kwargs):
raise NotImplementedError()
def create_docker_run_artifact(self, body, run_id, **kwargs):
raise NotImplementedError()
def get_docker_license_information(self, **kwargs):
raise NotImplementedError()
def get_docker_run_artifact_read_url_by_id(self, run_id, artifact_id, **kwargs):
raise NotImplementedError()
def get_docker_run_by_id(self, run_id, **kwargs):
raise NotImplementedError()
def get_docker_run_by_scheduled_id(self, scheduled_id, **kwargs):
raise NotImplementedError()
def get_docker_run_logs_by_id(self, run_id, **kwargs):
raise NotImplementedError()
def get_docker_run_report_read_url_by_id(self, run_id, **kwargs):
raise NotImplementedError()
def get_docker_run_report_write_url_by_id(self, run_id, **kwargs):
raise NotImplementedError()
def get_docker_runs_scheduled_by_state_and_labels(self, **kwargs):
raise NotImplementedError()
def get_docker_runs_scheduled_by_worker_id(self, worker_id, **kwargs):
raise NotImplementedError()
def get_docker_worker_config_by_id(self, config_id, **kwargs):
raise NotImplementedError()
def get_docker_worker_configs(self, **kwargs):
raise NotImplementedError()
def get_docker_worker_registry_entry_by_id(self, worker_id, **kwargs):
raise NotImplementedError()
def post_docker_authorization_request(self, body, **kwargs):
raise NotImplementedError()
def post_docker_usage_stats(self, body, **kwargs):
raise NotImplementedError()
def post_docker_worker_authorization_request(self, body, **kwargs):
raise NotImplementedError()
def update_docker_run_by_id(self, body, run_id, **kwargs):
raise NotImplementedError()
def update_docker_worker_config_by_id(self, body, config_id, **kwargs):
raise NotImplementedError()
def update_docker_worker_registry_entry_by_id(self, body, worker_id, **kwargs):
raise NotImplementedError()
def update_scheduled_docker_run_state_by_id(
self, body, dataset_id, worker_id, scheduled_id, **kwargs
):
raise NotImplementedError()
class MockedVersioningApi(VersioningApi):
def get_latest_pip_version(self, **kwargs):
return "1.2.8"
def get_minimum_compatible_pip_version(self, **kwargs):
return "1.2.1"
class MockedQuotaApi(QuotaApi):
def get_quota_maximum_dataset_size(self, **kwargs):
return "60000"
def mocked_request_put(dst_url: str, data=IOBase) -> Response:
assert isinstance(dst_url, str)
content_bytes: bytes = data.read()
content_str: str = content_bytes.decode("utf-8")
assert content_str.startswith("filenames")
response_ = Response()
response_.status_code = 200
return response_
class MockedAPICollaboration(CollaborationApi):
def create_or_update_shared_access_config_by_dataset_id(
self, shared_access_config_create_request, dataset_id, **kwargs
):
assert isinstance(
shared_access_config_create_request, SharedAccessConfigCreateRequest
)
return CreateEntityResponse(id=utils.generate_id())
def get_shared_access_configs_by_dataset_id(self, dataset_id, **kwargs):
write_config = SharedAccessConfigData(
id=utils.generate_id(),
owner="owner-id",
users=["user1@gmail.com", "user2@something.com"],
teams=["some-id"],
created_at=0,
last_modified_at=0,
access_type=SharedAccessType.WRITE,
)
return [write_config]
class MockedApiWorkflowClient(ApiWorkflowClient):
embeddings_filename_base = "img"
n_embedding_rows_on_server = N_FILES_ON_SERVER
def __init__(self, *args, **kwargs):
lightly.api.version_checking.VersioningApi = MockedVersioningApi
ApiWorkflowClient.__init__(self, *args, **kwargs)
self._selection_api = MockedSamplingsApi(api_client=self.api_client)
self._jobs_api = MockedJobsApi(api_client=self.api_client)
self._tags_api = MockedTagsApi(api_client=self.api_client)
self._embeddings_api = MockedEmbeddingsApi(api_client=self.api_client)
self._samples_api = MockedSamplesApi(api_client=self.api_client)
self._mappings_api = MockedMappingsApi(
api_client=self.api_client, samples_api=self._samples_api
)
self._scores_api = MockedScoresApi(api_client=self.api_client)
self._datasets_api = MockedDatasetsApi(api_client=self.api_client)
self._datasources_api = MockedDatasourcesApi(api_client=self.api_client)
self._quota_api = MockedQuotaApi(api_client=self.api_client)
self._compute_worker_api = MockedComputeWorkerApi(api_client=self.api_client)
self._collaboration_api = MockedAPICollaboration(api_client=self.api_client)
lightly.api.api_workflow_client.requests.put = mocked_request_put
self.wait_time_till_next_poll = 0.001 # for api_workflow_selection
def upload_file_with_signed_url(
self,
file: IOBase,
signed_write_url: str,
max_backoff: int = 32,
max_retries: int = 5,
headers: Dict = None,
session: Optional[requests.Session] = None,
) -> Response:
res = Response()
return res
def _get_csv_reader_from_read_url(self, read_url: str):
n_rows: int = self.n_embedding_rows_on_server
n_dims: int = self.n_dims_embeddings_on_server
rows_csv = [
["filenames"] + [f"embedding_{i}" for i in range(n_dims)] + ["labels"]
]
for i in range(n_rows):
row = [f"{self.embeddings_filename_base}_{i}.jpg"]
for _ in range(n_dims):
row.append(np.random.uniform(0, 1))
row.append(i)
rows_csv.append(row)
# save the csv rows in a temporary in-memory string file
# using a csv writer and then read them as bytes
f = tempfile.SpooledTemporaryFile(mode="rw")
writer = csv.writer(f)
writer.writerows(rows_csv)
f.seek(0)
buffer = io.StringIO(f.read())
reader = csv.reader(buffer)
return reader
class MockedApiWorkflowSetup(unittest.TestCase):
EMBEDDINGS_FILENAME_BASE: str = "sample"
def setUp(self, token="token_xyz", dataset_id="dataset_id_xyz") -> None:
self.api_workflow_client = MockedApiWorkflowClient(
token=token, dataset_id=dataset_id
)
def _start_and_end_offset(
page_size: Optional[int],
page_offset: Optional[int],
) -> Union[Tuple[int, int], Tuple[None, None]]:
if page_size is None and page_offset is None:
return None, None
elif page_size is not None and page_offset is not None:
return page_offset, page_offset + page_size
else:
assert False, "page_size and page_offset must either both be None or both set"
| 38,497 | 33.558348 | 163 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow.py | import os
from unittest import mock
import numpy as np
import lightly
from tests.api_workflow import utils
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestApiWorkflow(MockedApiWorkflowSetup):
def setUp(self) -> None:
lightly.api.api_workflow_client.__version__ = lightly.__version__
self.api_workflow_client = MockedApiWorkflowClient(token="token_xyz")
@mock.patch.dict(os.environ, {"LIGHTLY_TOKEN": "token_xyz"})
def test_init_with_env_token(self):
MockedApiWorkflowClient()
def test_error_if_init_without_token(self):
# copy environment variables but remove LIGHTLY_TOKEN if it exists
env_without_token = {
k: v for k, v in os.environ.items() if k != "LIGHTLY_TOKEN"
}
with self.assertRaises(ValueError), mock.patch.dict(
os.environ, env_without_token, clear=True
):
MockedApiWorkflowClient()
def test_error_if_version_is_incompatible(self):
lightly.api.api_workflow_client.__version__ = "0.0.0"
with self.assertWarns(UserWarning):
MockedApiWorkflowClient(token="token_xyz")
lightly.api.api_workflow_client.__version__ = lightly.__version__
def test_dataset_id_nonexisting(self):
self.api_workflow_client._datasets_api.reset()
assert not hasattr(self.api_workflow_client, "_dataset_id")
with self.assertWarns(UserWarning):
dataset_id = self.api_workflow_client.dataset_id
assert dataset_id == self.api_workflow_client._datasets_api.datasets[-1].id
def test_dataset_id_existing(self):
id = utils.generate_id()
self.api_workflow_client._dataset_id = id
assert self.api_workflow_client.dataset_id == id
def test_set_dataset_id_existing(self):
datasets = self.api_workflow_client.get_all_datasets()
self.api_workflow_client.dataset_id = datasets[1].id
def test_set_dataset_id_missing(self):
with self.assertRaises(ValueError):
self.api_workflow_client.dataset_id = "nonexisting-id"
def test_reorder_random(self):
no_random_tries = 100
for iter in range(no_random_tries):
numbers_to_choose_from = list(range(100))
numbers_all = list(np.random.choice(numbers_to_choose_from, 100))
filenames_on_server = [f"img_{i}" for i in numbers_all]
api_workflow_client = MockedApiWorkflowClient(
token="token_xyz", dataset_id="dataset_id_xyz"
)
api_workflow_client._mappings_api.sample_names = filenames_on_server
numbers_in_tag = np.copy(numbers_all)
np.random.shuffle(numbers_in_tag)
filenames_for_list = [f"img_{i}" for i in numbers_in_tag]
list_ordered = api_workflow_client._order_list_by_filenames(
filenames_for_list, list_to_order=numbers_in_tag
)
list_desired_order = [i for i in numbers_all if i in numbers_in_tag]
assert list_ordered == list_desired_order
def test_reorder_manual(self):
filenames_on_server = ["a", "b", "c"]
api_workflow_client = MockedApiWorkflowClient(
token="token_xyz", dataset_id="dataset_id_xyz"
)
api_workflow_client._mappings_api.sample_names = filenames_on_server
filenames_for_list = ["c", "a", "b"]
list_to_order = ["cccc", "aaaa", "bbbb"]
list_ordered = api_workflow_client._order_list_by_filenames(
filenames_for_list, list_to_order=list_to_order
)
list_desired_order = ["aaaa", "bbbb", "cccc"]
assert list_ordered == list_desired_order
def test_reorder_wrong_lengths(self):
filenames_on_server = ["a", "b", "c"]
api_workflow_client = MockedApiWorkflowClient(
token="token_xyz", dataset_id="dataset_id_xyz"
)
api_workflow_client._mappings_api.sample_names = filenames_on_server
filenames_for_list = ["c", "a", "b"]
list_to_order = ["cccc", "aaaa", "bbbb"]
with self.subTest("filenames_for_list wrong length"):
with self.assertRaises(ValueError):
api_workflow_client._order_list_by_filenames(
filenames_for_list[:-1], list_to_order
)
with self.subTest("list_to_order wrong length"):
with self.assertRaises(ValueError):
api_workflow_client._order_list_by_filenames(
filenames_for_list, list_to_order[:-1]
)
with self.subTest("filenames_for_list and list_to_order wrong length"):
with self.assertRaises(ValueError):
api_workflow_client._order_list_by_filenames(
filenames_for_list[:-1], list_to_order[:-1]
)
| 4,903 | 39.528926 | 83 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_artifacts.py | import pytest
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, ArtifactNotExist
from lightly.openapi_generated.swagger_client.api import DockerApi
from lightly.openapi_generated.swagger_client.models import (
DockerRunArtifactData,
DockerRunArtifactType,
DockerRunData,
DockerRunState,
)
from tests.api_workflow import utils
def test_download_compute_worker_run_artifacts(mocker: MockerFixture) -> None:
client = ApiWorkflowClient(token="123")
mock_download_compute_worker_run_artifact = mocker.MagicMock(
spec_set=client._download_compute_worker_run_artifact
)
client._download_compute_worker_run_artifact = (
mock_download_compute_worker_run_artifact
)
run_id = utils.generate_id()
artifact_ids = [utils.generate_id(), utils.generate_id()]
run = DockerRunData(
id=run_id,
user_id="user-id",
dataset_id=utils.generate_id(),
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
artifacts=[
DockerRunArtifactData(
id=artifact_ids[0],
file_name="report.pdf",
type=DockerRunArtifactType.REPORT_PDF,
),
DockerRunArtifactData(
id=artifact_ids[1],
file_name="checkpoint.ckpt",
type=DockerRunArtifactType.CHECKPOINT,
),
],
)
client.download_compute_worker_run_artifacts(run=run, output_dir="output_dir")
calls = [
mocker.call(
run_id=run_id,
artifact_id=artifact_ids[0],
output_path="output_dir/report.pdf",
timeout=60,
),
mocker.call(
run_id=run_id,
artifact_id=artifact_ids[1],
output_path="output_dir/checkpoint.ckpt",
timeout=60,
),
]
mock_download_compute_worker_run_artifact.assert_has_calls(calls=calls)
assert mock_download_compute_worker_run_artifact.call_count == len(calls)
def test__download_compute_worker_run_artifact_by_type(
mocker: MockerFixture,
) -> None:
client = ApiWorkflowClient(token="123")
mock_download_compute_worker_run_artifact = mocker.MagicMock(
spec_set=client._download_compute_worker_run_artifact
)
client._download_compute_worker_run_artifact = (
mock_download_compute_worker_run_artifact
)
run_id = utils.generate_id()
artifact_ids = [utils.generate_id(), utils.generate_id()]
run = DockerRunData(
id=run_id,
user_id="user-id",
dataset_id=utils.generate_id(),
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
artifacts=[
DockerRunArtifactData(
id=artifact_ids[0],
file_name="report.pdf",
type=DockerRunArtifactType.REPORT_PDF,
),
DockerRunArtifactData(
id=artifact_ids[1],
file_name="checkpoint.ckpt",
type=DockerRunArtifactType.CHECKPOINT,
),
],
)
client._download_compute_worker_run_artifact_by_type(
run=run,
artifact_type=DockerRunArtifactType.CHECKPOINT,
output_path="output_dir/checkpoint.ckpt",
timeout=0,
)
mock_download_compute_worker_run_artifact.assert_called_once_with(
run_id=run_id,
artifact_id=artifact_ids[1],
output_path="output_dir/checkpoint.ckpt",
timeout=0,
)
def test__download_compute_worker_run_artifact_by_type__no_artifacts(
mocker: MockerFixture,
) -> None:
client = ApiWorkflowClient(token="123")
mock_download_compute_worker_run_artifact = mocker.MagicMock(
spec_set=client._download_compute_worker_run_artifact
)
client._download_compute_worker_run_artifact = (
mock_download_compute_worker_run_artifact
)
run = DockerRunData(
id=utils.generate_id(),
user_id="user-id",
dataset_id=utils.generate_id(),
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
artifacts=None,
)
with pytest.raises(ArtifactNotExist, match="Run has no artifacts."):
client._download_compute_worker_run_artifact_by_type(
run=run,
artifact_type=DockerRunArtifactType.CHECKPOINT,
output_path="output_dir/checkpoint.ckpt",
timeout=0,
)
def test__download_compute_worker_run_artifact_by_type__no_artifact_with_type(
mocker: MockerFixture,
) -> None:
client = ApiWorkflowClient(token="123")
mock_download_compute_worker_run_artifact = mocker.MagicMock(
spec_set=client._download_compute_worker_run_artifact
)
client._download_compute_worker_run_artifact = (
mock_download_compute_worker_run_artifact
)
run = DockerRunData(
id=utils.generate_id(),
user_id="user-id",
dataset_id=utils.generate_id(),
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
artifacts=[
DockerRunArtifactData(
id=utils.generate_id(),
file_name="report.pdf",
type=DockerRunArtifactType.REPORT_PDF,
),
],
)
with pytest.raises(ArtifactNotExist, match="No artifact with type"):
client._download_compute_worker_run_artifact_by_type(
run=run,
artifact_type=DockerRunArtifactType.CHECKPOINT,
output_path="output_dir/checkpoint.ckpt",
timeout=0,
)
def test__get_compute_worker_run_checkpoint_url(
mocker: MockerFixture,
) -> None:
mocked_client = mocker.MagicMock(spec=ApiWorkflowClient)
mocked_artifact = DockerRunArtifactData(
id=utils.generate_id(),
file_name="report.pdf",
type=DockerRunArtifactType.REPORT_PDF,
)
mocked_client._get_artifact_by_type.return_value = mocked_artifact
mocked_client._compute_worker_api = mocker.MagicMock(spec_set=DockerApi)
mocked_client._compute_worker_api.get_docker_run_artifact_read_url_by_id.return_value = (
"some_read_url"
)
run = DockerRunData(
id=utils.generate_id(),
user_id="user-id",
dataset_id=utils.generate_id(),
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
artifacts=[mocked_artifact],
)
read_url = ApiWorkflowClient.get_compute_worker_run_checkpoint_url(
self=mocked_client, run=run
)
assert read_url == "some_read_url"
mocked_client._get_artifact_by_type.assert_called_with(
artifact_type=DockerRunArtifactType.CHECKPOINT, run=run
)
mocked_client._compute_worker_api.get_docker_run_artifact_read_url_by_id.assert_called_with(
run_id=run.id, artifact_id=mocked_artifact.id
)
| 7,097 | 32.481132 | 96 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_client.py | import os
import platform
import unittest
from unittest import mock
import requests
from pytest_mock import MockerFixture
import lightly
from lightly.api.api_workflow_client import LIGHTLY_S3_SSE_KMS_KEY, ApiWorkflowClient
class TestApiWorkflowClient(unittest.TestCase):
def test_upload_file_with_signed_url(self):
with mock.patch("lightly.api.api_workflow_client.requests") as requests:
client = ApiWorkflowClient(token="")
file = mock.Mock()
signed_write_url = ""
client.upload_file_with_signed_url(
file=file,
signed_write_url=signed_write_url,
)
requests.put.assert_called_with(signed_write_url, data=file)
def test_upload_file_with_signed_url_session(self):
session = mock.Mock()
file = mock.Mock()
signed_write_url = ""
client = ApiWorkflowClient(token="")
client.upload_file_with_signed_url(
file=file, signed_write_url=signed_write_url, session=session
)
session.put.assert_called_with(signed_write_url, data=file)
def test_upload_file_with_signed_url_session_sse(self):
session = mock.Mock()
file = mock.Mock()
signed_write_url = "http://somwhere.s3.amazonaws.com/someimage.png"
client = ApiWorkflowClient(token="")
# set the environment var to enable SSE
os.environ[LIGHTLY_S3_SSE_KMS_KEY] = "True"
client.upload_file_with_signed_url(
file=file, signed_write_url=signed_write_url, session=session
)
session.put.assert_called_with(
signed_write_url,
data=file,
headers={"x-amz-server-side-encryption": "AES256"},
)
def test_upload_file_with_signed_url_session_sse_kms(self):
session = mock.Mock()
file = mock.Mock()
signed_write_url = "http://somwhere.s3.amazonaws.com/someimage.png"
client = ApiWorkflowClient(token="")
# set the environment var to enable SSE with KMS
sseKMSKey = "arn:aws:kms:us-west-2:123456789000:key/1234abcd-12ab-34cd-56ef-1234567890ab"
os.environ[LIGHTLY_S3_SSE_KMS_KEY] = sseKMSKey
client.upload_file_with_signed_url(
file=file, signed_write_url=signed_write_url, session=session
)
session.put.assert_called_with(
signed_write_url,
data=file,
headers={
"x-amz-server-side-encryption": "aws:kms",
"x-amz-server-side-encryption-aws-kms-key-id": sseKMSKey,
},
)
def test_upload_file_with_signed_url_raise_status(self):
def raise_connection_error(*args, **kwargs):
raise requests.exceptions.ConnectionError()
with mock.patch(
"lightly.api.api_workflow_client.requests.put", raise_connection_error
):
client = ApiWorkflowClient(token="")
with self.assertRaises(requests.exceptions.ConnectionError):
client.upload_file_with_signed_url(
file=mock.Mock(),
signed_write_url="",
)
def test_user_agent_header(mocker: MockerFixture) -> None:
mocker.patch.object(lightly.api.api_workflow_client, "__version__", new="VERSION")
mocker.patch.object(
lightly.api.api_workflow_client.version_checking,
"is_compatible_version",
new=lambda _: True,
)
mocked_platform = mocker.patch.object(
lightly.api.api_workflow_client, "platform", spec_set=platform
)
mocked_platform.system.return_value = "SYSTEM"
mocked_platform.release.return_value = "RELEASE"
mocked_platform.platform.return_value = "PLATFORM"
mocked_platform.processor.return_value = "PROCESSOR"
mocked_platform.python_version.return_value = "PYTHON_VERSION"
client = ApiWorkflowClient(token="")
assert (
client.api_client.user_agent
== f"Lightly/VERSION (SYSTEM/RELEASE; PLATFORM; PROCESSOR;) python/PYTHON_VERSION"
)
| 4,057 | 36.574074 | 97 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_collaboration.py | from tests.api_workflow import utils
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestApiWorkflowDatasets(MockedApiWorkflowSetup):
def setUp(self) -> None:
self.api_workflow_client = MockedApiWorkflowClient(token="token_xyz")
def test_share_empty_dataset(self):
self.api_workflow_client.share_dataset_only_with(
dataset_id=utils.generate_id(), user_emails=[]
)
def test_share_dataset(self):
self.api_workflow_client.share_dataset_only_with(
dataset_id=utils.generate_id(), user_emails=["someone@something.com"]
)
def test_get_shared_users(self):
user_emails = self.api_workflow_client.get_shared_users(
dataset_id=utils.generate_id()
)
assert user_emails == ["user1@gmail.com", "user2@something.com"]
| 902 | 32.444444 | 81 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_compute_worker.py | import json
import random
from typing import Any, List
from unittest import mock
from unittest.mock import MagicMock
import pytest
from pydantic import ValidationError
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, api_workflow_compute_worker
from lightly.api.api_workflow_compute_worker import (
STATE_SCHEDULED_ID_NOT_FOUND,
ComputeWorkerRunInfo,
InvalidConfigurationError,
_config_to_camel_case,
_snake_to_camel_case,
_validate_config,
)
from lightly.openapi_generated.swagger_client.api import DockerApi
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.models import (
DockerRunData,
DockerRunScheduledData,
DockerRunScheduledPriority,
DockerRunScheduledState,
DockerRunState,
DockerWorkerConfigV3,
DockerWorkerConfigV3Docker,
DockerWorkerConfigV3DockerCorruptnessCheck,
DockerWorkerConfigV3Lightly,
DockerWorkerConfigV3LightlyCollate,
DockerWorkerConfigV3LightlyLoader,
DockerWorkerState,
DockerWorkerType,
SelectionConfig,
SelectionConfigEntry,
SelectionConfigEntryInput,
SelectionConfigEntryStrategy,
SelectionInputPredictionsName,
SelectionInputType,
SelectionStrategyThresholdOperation,
SelectionStrategyType,
TagData,
)
from lightly.openapi_generated.swagger_client.rest import ApiException
from tests.api_workflow import utils
from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup
class TestApiWorkflowComputeWorker(MockedApiWorkflowSetup):
def test_register_compute_worker(self):
# default name
worker_id = self.api_workflow_client.register_compute_worker()
assert worker_id
# custom name
worker_id = self.api_workflow_client.register_compute_worker(name="my-worker")
assert worker_id
def test_delete_compute_worker(self):
with mock.patch(
"tests.api_workflow.mocked_api_workflow_client.MockedComputeWorkerApi"
".delete_docker_worker_registry_entry_by_id",
) as mock_delete_worker:
self.api_workflow_client.delete_compute_worker("worker_id")
mock_delete_worker.assert_called_once_with("worker_id")
def test_create_compute_worker_config(self):
config_id = self.api_workflow_client.create_compute_worker_config(
worker_config={
"training": {"task_name": "lightly_pretagging"},
},
lightly_config={
"loader": {
"batch_size": 64,
},
},
selection_config={
"n_samples": 20,
"strategies": [
{
"input": {
"type": "EMBEDDINGS",
"dataset_id": utils.generate_id(),
"tag_name": "some-tag-name",
},
"strategy": {"type": "SIMILARITY"},
},
],
},
)
assert config_id
def test_create_compute_worker_config__selection_config_is_class(self) -> None:
config_id = self.api_workflow_client.create_compute_worker_config(
worker_config={
"pretagging": True,
},
lightly_config={
"loader": {
"batch_size": 64,
},
},
selection_config=SelectionConfig(
n_samples=20,
strategies=[
SelectionConfigEntry(
input=SelectionConfigEntryInput(
type=SelectionInputType.EMBEDDINGS,
dataset_id=utils.generate_id(),
tag_name="some-tag-name",
),
strategy=SelectionConfigEntryStrategy(
type=SelectionStrategyType.SIMILARITY,
),
)
],
),
)
assert config_id
def test_create_compute_worker_config__all_none(self) -> None:
config_id = self.api_workflow_client.create_compute_worker_config(
worker_config=None,
lightly_config=None,
selection_config=None,
)
assert config_id
def test_schedule_compute_worker_run(self):
scheduled_run_id = self.api_workflow_client.schedule_compute_worker_run(
worker_config={
"pretagging": True,
},
lightly_config={
"loader": {
"batch_size": 64,
},
},
)
assert scheduled_run_id
def test_schedule_compute_worker_run__priority(self):
scheduled_run_id = self.api_workflow_client.schedule_compute_worker_run(
worker_config={},
lightly_config={},
priority=DockerRunScheduledPriority.HIGH,
)
assert scheduled_run_id
def test_schedule_compute_worker_run__runs_on(self):
scheduled_run_id = self.api_workflow_client.schedule_compute_worker_run(
worker_config={}, lightly_config={}, runs_on=["AAA", "BBB"]
)
assert scheduled_run_id
def test_get_compute_worker_ids(self):
ids = self.api_workflow_client.get_compute_worker_ids()
assert all(isinstance(id_, str) for id_ in ids)
def test_get_compute_workers(self):
workers = self.api_workflow_client.get_compute_workers()
assert len(workers) == 1
assert workers[0].name == "worker-name-1"
assert workers[0].state == DockerWorkerState.OFFLINE
assert workers[0].labels == ["label-1"]
def test_get_compute_worker_runs(self):
runs = self.api_workflow_client.get_compute_worker_runs()
assert len(runs) > 0
assert all(isinstance(run, DockerRunData) for run in runs)
def test_get_scheduled_compute_worker_runs(self):
with mock.patch(
"tests.api_workflow.mocked_api_workflow_client.MockedComputeWorkerApi"
".get_docker_runs_scheduled_by_dataset_id",
) as mock_get_runs:
self.api_workflow_client.get_scheduled_compute_worker_runs()
mock_get_runs.assert_called_once_with(
dataset_id=self.api_workflow_client.dataset_id
)
with mock.patch(
"tests.api_workflow.mocked_api_workflow_client.MockedComputeWorkerApi"
".get_docker_runs_scheduled_by_dataset_id",
) as mock_get_runs:
self.api_workflow_client.get_scheduled_compute_worker_runs(state="state")
mock_get_runs.assert_called_once_with(
dataset_id=self.api_workflow_client.dataset_id, state="state"
)
def _check_if_openapi_generated_obj_is_valid(self, obj) -> Any:
api_client = ApiClient()
obj_as_json = json.dumps(api_client.sanitize_for_serialization(obj))
mocked_response = mock.MagicMock()
mocked_response.data = obj_as_json
obj_api = api_client.deserialize(mocked_response, type(obj).__name__)
self.assertDictEqual(obj.to_dict(), obj_api.to_dict())
return obj_api
def test_selection_config(self):
selection_config = SelectionConfig(
n_samples=1,
strategies=[
SelectionConfigEntry(
input=SelectionConfigEntryInput(type=SelectionInputType.EMBEDDINGS),
strategy=SelectionConfigEntryStrategy(
type=SelectionStrategyType.DIVERSITY,
stopping_condition_minimum_distance=-1,
),
),
SelectionConfigEntry(
input=SelectionConfigEntryInput(
type=SelectionInputType.SCORES,
task="my-classification-task",
score="uncertainty_margin",
),
strategy=SelectionConfigEntryStrategy(
type=SelectionStrategyType.WEIGHTS
),
),
SelectionConfigEntry(
input=SelectionConfigEntryInput(
type=SelectionInputType.METADATA, key="lightly.sharpness"
),
strategy=SelectionConfigEntryStrategy(
type=SelectionStrategyType.THRESHOLD,
threshold=20,
operation=SelectionStrategyThresholdOperation.BIGGER_EQUAL,
),
),
SelectionConfigEntry(
input=SelectionConfigEntryInput(
type=SelectionInputType.PREDICTIONS,
task="my_object_detection_task",
name=SelectionInputPredictionsName.CLASS_DISTRIBUTION,
),
strategy=SelectionConfigEntryStrategy(
type=SelectionStrategyType.BALANCE,
target={"Ambulance": 0.2, "Bus": 0.4},
),
),
],
)
config = DockerWorkerConfigV3(
worker_type=DockerWorkerType.FULL, selection=selection_config
)
self._check_if_openapi_generated_obj_is_valid(config)
def test_selection_config_from_dict() -> None:
dataset_id = utils.generate_id()
cfg = {
"n_samples": 10,
"proportion_samples": 0.1,
"strategies": [
{
"input": {
"type": "EMBEDDINGS",
"dataset_id": dataset_id,
"tag_name": "some-tag-name",
},
"strategy": {"type": "SIMILARITY"},
},
{
"input": {
"type": "METADATA",
"key": "lightly.sharpness",
},
"strategy": {
"type": "THRESHOLD",
"threshold": 20,
"operation": "BIGGER",
},
},
],
}
selection_cfg = api_workflow_compute_worker.selection_config_from_dict(cfg)
assert selection_cfg.n_samples == 10
assert selection_cfg.proportion_samples == 0.1
assert selection_cfg.strategies is not None
assert len(selection_cfg.strategies) == 2
assert selection_cfg.strategies[0].input.type == "EMBEDDINGS"
assert selection_cfg.strategies[0].input.dataset_id == dataset_id
assert selection_cfg.strategies[0].input.tag_name == "some-tag-name"
assert selection_cfg.strategies[0].strategy.type == "SIMILARITY"
assert selection_cfg.strategies[1].input.type == "METADATA"
assert selection_cfg.strategies[1].input.key == "lightly.sharpness"
assert selection_cfg.strategies[1].strategy.type == "THRESHOLD"
assert selection_cfg.strategies[1].strategy.threshold == 20
assert selection_cfg.strategies[1].strategy.operation == "BIGGER"
# verify that original dict was not mutated
assert isinstance(cfg["strategies"][0]["input"], dict)
def test_selection_config_from_dict__missing_strategies() -> None:
cfg = {}
with pytest.raises(
ValidationError,
match=r"strategies\n ensure this value has at least 1 items",
):
api_workflow_compute_worker.selection_config_from_dict(cfg)
def test_selection_config_from_dict__extra_key() -> None:
cfg = {"strategies": [], "invalid-key": 0}
with pytest.raises(
ValidationError,
match=r"invalid-key\n extra fields not permitted",
):
api_workflow_compute_worker.selection_config_from_dict(cfg)
def test_selection_config_from_dict__extra_stratey_key() -> None:
cfg = {
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {"type": "DIVERSITY"},
"invalid-key": {"type": ""},
},
],
}
with pytest.raises(
ValidationError,
match=r"invalid-key\n extra fields not permitted",
):
api_workflow_compute_worker.selection_config_from_dict(cfg)
def test_selection_config_from_dict__extra_strategy_strategy_key() -> None:
cfg = {
"strategies": [
{
"input": {"type": "EMBEDDINGS"},
"strategy": {
"type": "DIVERSITY",
"stoppingConditionMinimumDistance": 0,
},
},
],
}
with pytest.raises(
ValidationError,
match=r"stoppingConditionMinimumDistance\n extra fields not permitted",
):
api_workflow_compute_worker.selection_config_from_dict(cfg)
def test_selection_config_from_dict__multiple_references() -> None:
"""Test that conversion is successful if the dictionary contains multiple references
to the same object.
"""
strategy = {"input": {"type": "EMBEDDINGS"}, "strategy": {"type": "DIVERSITY"}}
cfg = {"strategies": [strategy, strategy]}
selection_cfg = api_workflow_compute_worker.selection_config_from_dict(cfg)
assert len(selection_cfg.strategies) == 2
assert selection_cfg.strategies[0] == selection_cfg.strategies[1]
def test_get_scheduled_run_by_id() -> None:
run_ids = [utils.generate_id() for _ in range(3)]
scheduled_runs = [
DockerRunScheduledData(
id=run_id,
dataset_id=utils.generate_id(),
config_id=utils.generate_id(),
priority=DockerRunScheduledPriority.MID,
state=DockerRunScheduledState.OPEN,
created_at=0,
last_modified_at=1,
runs_on=[],
)
for run_id in run_ids
]
mocked_compute_worker_api = MagicMock(
get_docker_runs_scheduled_by_dataset_id=lambda dataset_id: scheduled_runs
)
mocked_api_client = MagicMock(
dataset_id="asdf", _compute_worker_api=mocked_compute_worker_api
)
scheduled_run_id = run_ids[2]
scheduled_run_data = ApiWorkflowClient._get_scheduled_run_by_id(
self=mocked_api_client, scheduled_run_id=scheduled_run_id
)
assert scheduled_run_data.id == scheduled_run_id
def test_get_scheduled_run_by_id_not_found() -> None:
scheduled_runs = [
DockerRunScheduledData(
id=utils.generate_id(),
dataset_id=utils.generate_id(),
config_id=utils.generate_id(),
priority=DockerRunScheduledPriority.LOW,
state=DockerRunScheduledState.OPEN,
created_at=0,
last_modified_at=1,
runs_on=[],
)
for _ in range(3)
]
mocked_compute_worker_api = MagicMock(
get_docker_runs_scheduled_by_dataset_id=lambda dataset_id: scheduled_runs
)
mocked_api_client = MagicMock(
dataset_id="asdf", _compute_worker_api=mocked_compute_worker_api
)
scheduled_run_id = "id_5"
with pytest.raises(
ApiException,
match=f"No scheduled run found for run with scheduled_run_id='{scheduled_run_id}'.",
):
ApiWorkflowClient._get_scheduled_run_by_id(
self=mocked_api_client, scheduled_run_id=scheduled_run_id
)
def test_get_compute_worker_state_and_message_OPEN() -> None:
dataset_id = utils.generate_id()
scheduled_run = DockerRunScheduledData(
id=utils.generate_id(),
dataset_id=dataset_id,
config_id=utils.generate_id(),
priority=DockerRunScheduledPriority.MID,
state=DockerRunScheduledState.OPEN,
created_at=0,
last_modified_at=1,
runs_on=["worker-label"],
)
def mocked_raise_exception(*args, **kwargs):
raise ApiException
mocked_api_client = MagicMock(
dataset_id=dataset_id,
_compute_worker_api=MagicMock(
get_docker_run_by_scheduled_id=mocked_raise_exception
),
_get_scheduled_run_by_id=lambda id: scheduled_run,
)
run_info = ApiWorkflowClient.get_compute_worker_run_info(
self=mocked_api_client, scheduled_run_id=""
)
assert run_info.state == DockerRunScheduledState.OPEN
assert run_info.message.startswith("Waiting for pickup by Lightly Worker.")
assert run_info.in_end_state() == False
def test_create_docker_worker_config_v3_api_error() -> None:
class HttpThing:
def __init__(self, status, reason, data):
self.status = status
self.reason = reason
self.data = data
def getheaders(self):
return []
def mocked_raise_exception(*args, **kwargs):
raise ApiException(
http_resp=HttpThing(
403,
"Not everything has a reason",
'{"code": "ACCOUNT_SUBSCRIPTION_INSUFFICIENT", "error": "Your current plan allows for 1000000 samples but you tried to use 2000000 samples, please contact sales at sales@lightly.ai to upgrade your account."}',
)
)
client = ApiWorkflowClient(token="123")
client._dataset_id = utils.generate_id()
client._compute_worker_api.create_docker_worker_config_v3 = mocked_raise_exception
with pytest.raises(
ValueError,
match=r'Trying to schedule your job resulted in\n>> ACCOUNT_SUBSCRIPTION_INSUFFICIENT\n>> "Your current plan allows for 1000000 samples but you tried to use 2000000 samples, please contact sales at sales@lightly.ai to upgrade your account."\n>> Please fix the issue mentioned above and see our docs https://docs.lightly.ai/docs/all-configuration-options for more help.',
):
r = client.create_compute_worker_config(
selection_config={
"n_samples": 2000000,
"strategies": [
{"input": {"type": "EMBEDDINGS"}, "strategy": {"type": "DIVERSITY"}}
],
},
)
def test_create_docker_worker_config_v3_5xx_api_error() -> None:
class HttpThing:
def __init__(self, status, reason, data):
self.status = status
self.reason = reason
self.data = data
def getheaders(self):
return []
def mocked_raise_exception(*args, **kwargs):
raise ApiException(
http_resp=HttpThing(
502,
"Not everything has a reason",
'{"code": "SOMETHING_BAD", "error": "Server pains"}',
)
)
client = ApiWorkflowClient(token="123")
client._dataset_id = utils.generate_id()
client._compute_worker_api.create_docker_worker_config_v3 = mocked_raise_exception
with pytest.raises(
ApiException,
match=r"Server pains",
):
r = client.create_compute_worker_config(
selection_config={
"n_samples": 20,
"strategies": [
{"input": {"type": "EMBEDDINGS"}, "strategy": {"type": "DIVERSITY"}}
],
},
)
def test_create_docker_worker_config_v3_no_body_api_error() -> None:
def mocked_raise_exception(*args, **kwargs):
raise ApiException
client = ApiWorkflowClient(token="123")
client._dataset_id = utils.generate_id()
client._compute_worker_api.create_docker_worker_config_v3 = mocked_raise_exception
with pytest.raises(
ApiException,
):
r = client.create_compute_worker_config(
selection_config={
"n_samples": 20,
"strategies": [
{"input": {"type": "EMBEDDINGS"}, "strategy": {"type": "DIVERSITY"}}
],
},
)
def test_get_compute_worker_state_and_message_CANCELED() -> None:
def mocked_raise_exception(*args, **kwargs):
raise ApiException
mocked_api_client = MagicMock(
dataset_id=utils.generate_id(),
_compute_worker_api=MagicMock(
get_docker_run_by_scheduled_id=mocked_raise_exception
),
_get_scheduled_run_by_id=mocked_raise_exception,
)
run_info = ApiWorkflowClient.get_compute_worker_run_info(
self=mocked_api_client, scheduled_run_id=""
)
assert run_info.state == STATE_SCHEDULED_ID_NOT_FOUND
assert run_info.message.startswith("Could not find a job for the given run_id:")
assert run_info.in_end_state() == True
def test_get_compute_worker_state_and_message_docker_state() -> None:
message = "SOME_MESSAGE"
docker_run = DockerRunData(
id=utils.generate_id(),
user_id="user-id",
state=DockerRunState.GENERATING_REPORT,
docker_version="",
created_at=0,
last_modified_at=0,
message=message,
)
mocked_api_client = MagicMock(
dataset_id=utils.generate_id(),
_compute_worker_api=MagicMock(
get_docker_run_by_scheduled_id=lambda scheduled_id: docker_run
),
)
run_info = ApiWorkflowClient.get_compute_worker_run_info(
self=mocked_api_client, scheduled_run_id=utils.generate_id()
)
assert run_info.state == DockerRunState.GENERATING_REPORT
assert run_info.message == message
assert run_info.in_end_state() == False
def test_compute_worker_run_info_generator(mocker) -> None:
states = [f"state_{i}" for i in range(7)]
states[-1] = DockerRunState.COMPLETED
class MockedApiWorkflowClient:
def __init__(self, states: List[str]):
self.states = states
self.current_state_index = 0
random.seed(42)
def get_compute_worker_run_info(self, scheduled_run_id: str):
state = self.states[self.current_state_index]
if random.random() > 0.9:
self.current_state_index += 1
return ComputeWorkerRunInfo(state=state, message=state)
mocker.patch("time.sleep", lambda _: None)
mocked_client = MockedApiWorkflowClient(states)
run_infos = list(
ApiWorkflowClient.compute_worker_run_info_generator(
mocked_client, scheduled_run_id=""
)
)
expected_run_infos = [
ComputeWorkerRunInfo(state=state, message=state) for state in states
]
assert run_infos == expected_run_infos
def test_get_compute_worker_runs(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
dataset_id = utils.generate_id()
run_ids = [utils.generate_id(), utils.generate_id()]
client = ApiWorkflowClient(token="123")
mock_compute_worker_api = mocker.create_autospec(
DockerApi, spec_set=True
).return_value
mock_compute_worker_api.get_docker_runs.side_effect = [
[
DockerRunData(
id=run_ids[0],
user_id="user-id",
created_at=20,
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
last_modified_at=0,
),
DockerRunData(
id=run_ids[1],
user_id="user-id",
created_at=10,
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
last_modified_at=0,
),
],
]
client._compute_worker_api = mock_compute_worker_api
runs = client.get_compute_worker_runs()
assert runs == [
DockerRunData(
id=run_ids[1],
user_id="user-id",
created_at=10,
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
last_modified_at=0,
),
DockerRunData(
id=run_ids[0],
user_id="user-id",
created_at=20,
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
last_modified_at=0,
),
]
mock_compute_worker_api.get_docker_runs.assert_called_once_with(
page_offset=0, page_size=5000
)
def test_get_compute_worker_runs__dataset(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
dataset_id = utils.generate_id()
run_id = utils.generate_id()
client = ApiWorkflowClient(token="123")
mock_compute_worker_api = mocker.create_autospec(
DockerApi, spec_set=True
).return_value
mock_compute_worker_api.get_docker_runs_query_by_dataset_id.side_effect = [
[
DockerRunData(
id=run_id,
user_id="user-id",
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
),
],
[],
]
client._compute_worker_api = mock_compute_worker_api
runs = client.get_compute_worker_runs(dataset_id=dataset_id)
assert runs == [
DockerRunData(
id=run_id,
user_id="user-id",
dataset_id=dataset_id,
docker_version="",
state=DockerRunState.COMPUTING_METADATA,
created_at=0,
last_modified_at=0,
),
]
mock_compute_worker_api.get_docker_runs_query_by_dataset_id.assert_called_once_with(
page_offset=0, page_size=5000, dataset_id=dataset_id
)
def test_get_compute_worker_run_tags__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
run_id = utils.generate_id()
client = ApiWorkflowClient(token="123", dataset_id=utils.generate_id())
mock_compute_worker_api = mocker.create_autospec(
DockerApi, spec_set=True
).return_value
mock_compute_worker_api.get_docker_run_tags.return_value = []
client._compute_worker_api = mock_compute_worker_api
tags = client.get_compute_worker_run_tags(run_id=run_id)
assert len(tags) == 0
mock_compute_worker_api.get_docker_run_tags.assert_called_once_with(run_id=run_id)
def test_get_compute_worker_run_tags__single_tag(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
run_id = utils.generate_id()
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
client = ApiWorkflowClient(token="123", dataset_id=dataset_id)
client._dataset_id = dataset_id
mock_compute_worker_api = mocker.create_autospec(
DockerApi, spec_set=True
).return_value
mock_compute_worker_api.get_docker_run_tags.return_value = [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name="tag-0",
tot_size=0,
created_at=0,
changes=None,
run_id=run_id,
)
]
client._compute_worker_api = mock_compute_worker_api
tags = client.get_compute_worker_run_tags(run_id=run_id)
assert len(tags) == 1
mock_compute_worker_api.get_docker_run_tags.assert_called_once_with(run_id=run_id)
def test_get_compute_worker_run_tags__multiple_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
run_id = utils.generate_id()
dataset_id = utils.generate_id()
client = ApiWorkflowClient(token="123", dataset_id=dataset_id)
client._dataset_id = dataset_id
mock_compute_worker_api = mocker.create_autospec(
DockerApi, spec_set=True
).return_value
tag_ids = [utils.generate_id() for _ in range(3)]
tag_0 = TagData(
id=tag_ids[0],
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name="tag-0",
tot_size=0,
created_at=0,
changes=None,
run_id=run_id,
)
tag_1 = TagData(
id=tag_ids[1],
dataset_id=dataset_id,
prev_tag_id=tag_ids[0],
bit_mask_data="0x1",
name="tag-1",
tot_size=0,
created_at=1,
changes=None,
run_id=run_id,
)
# tag from a different dataset
tag_2 = TagData(
id=tag_ids[2],
dataset_id=utils.generate_id(),
prev_tag_id=None,
bit_mask_data="0x1",
name="tag-2",
tot_size=0,
created_at=2,
changes=None,
run_id=run_id,
)
# tags are returned ordered by decreasing creation date
mock_compute_worker_api.get_docker_run_tags.return_value = [tag_2, tag_1, tag_0]
client._compute_worker_api = mock_compute_worker_api
tags = client.get_compute_worker_run_tags(run_id="run-0")
assert len(tags) == 2
assert tags[0] == tag_1
assert tags[1] == tag_0
mock_compute_worker_api.get_docker_run_tags.assert_called_once_with(run_id="run-0")
def test__config_to_camel_case() -> None:
assert _config_to_camel_case(
{
"lorem_ipsum": "dolor",
"lorem": {
"ipsum_dolor": "sit_amet",
},
}
) == {
"loremIpsum": "dolor",
"lorem": {
"ipsumDolor": "sit_amet",
},
}
def test__snake_to_camel_case() -> None:
assert _snake_to_camel_case("lorem") == "lorem"
assert _snake_to_camel_case("lorem_ipsum") == "loremIpsum"
assert _snake_to_camel_case("lorem_ipsum_dolor") == "loremIpsumDolor"
assert _snake_to_camel_case("loremIpsum") == "loremIpsum" # do nothing
def test__validate_config__docker() -> None:
obj = DockerWorkerConfigV3Docker(
enable_training=False,
corruptness_check=DockerWorkerConfigV3DockerCorruptnessCheck(
corruption_threshold=0.1,
),
)
_validate_config(
cfg={
"enable_training": False,
"corruptness_check": {
"corruption_threshold": 0.1,
},
},
obj=obj,
)
def test__validate_config__docker_typo() -> None:
obj = DockerWorkerConfigV3Docker(
enable_training=False,
corruptness_check=DockerWorkerConfigV3DockerCorruptnessCheck(
corruption_threshold=0.1,
),
)
with pytest.raises(
InvalidConfigurationError,
match="Option 'enable_trainingx' does not exist! Did you mean 'enable_training'?",
):
_validate_config(
cfg={
"enable_trainingx": False,
"corruptness_check": {
"corruption_threshold": 0.1,
},
},
obj=obj,
)
def test__validate_config__docker_typo_nested() -> None:
obj = DockerWorkerConfigV3Docker(
enable_training=False,
corruptness_check=DockerWorkerConfigV3DockerCorruptnessCheck(
corruption_threshold=0.1,
),
)
with pytest.raises(
InvalidConfigurationError,
match="Option 'corruption_thresholdx' does not exist! Did you mean 'corruption_threshold'?",
):
_validate_config(
cfg={
"enable_training": False,
"corruptness_check": {
"corruption_thresholdx": 0.1,
},
},
obj=obj,
)
def test__validate_config__lightly() -> None:
obj = DockerWorkerConfigV3Lightly(
loader=DockerWorkerConfigV3LightlyLoader(
num_workers=-1,
batch_size=16,
shuffle=True,
),
collate=DockerWorkerConfigV3LightlyCollate(
rr_degrees=[-90, 90],
),
)
_validate_config(
cfg={
"loader": {
"num_workers": -1,
"batch_size": 16,
"shuffle": True,
},
"collate": {
"rr_degrees": [-90, 90],
},
},
obj=obj,
)
def test__validate_config__lightly_typo() -> None:
obj = DockerWorkerConfigV3Lightly(
loader=DockerWorkerConfigV3LightlyLoader(
num_workers=-1,
batch_size=16,
shuffle=True,
)
)
with pytest.raises(
InvalidConfigurationError,
match="Option 'loaderx' does not exist! Did you mean 'loader'?",
):
_validate_config(
cfg={
"loaderx": {
"num_workers": -1,
"batch_size": 16,
"shuffle": True,
},
},
obj=obj,
)
def test__validate_config__lightly_typo_nested() -> None:
obj = DockerWorkerConfigV3Lightly(
loader=DockerWorkerConfigV3LightlyLoader(
num_workers=-1,
batch_size=16,
shuffle=True,
)
)
with pytest.raises(
InvalidConfigurationError,
match="Option 'num_workersx' does not exist! Did you mean 'num_workers'?",
):
_validate_config(
cfg={
"loader": {
"num_workersx": -1,
"batch_size": 16,
"shuffle": True,
},
},
obj=obj,
)
| 33,286 | 32.931702 | 378 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_datasets.py | from typing import List
import pytest
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, api_workflow_datasets
from lightly.openapi_generated.swagger_client.api import DatasetsApi
from lightly.openapi_generated.swagger_client.models import (
Creator,
DatasetCreateRequest,
DatasetData,
DatasetType,
)
from lightly.openapi_generated.swagger_client.rest import ApiException
from tests.api_workflow import utils
from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup
def _get_datasets(count: int) -> List[DatasetData]:
return [
DatasetData(
name=f"mock_dataset_{i}",
id=utils.generate_id(),
last_modified_at=0,
type=DatasetType.IMAGES,
img_type="full",
size_in_bytes=-1,
n_samples=-1,
created_at=0,
user_id="user_0",
)
for i in range(count)
]
class TestApiWorkflowDatasets(MockedApiWorkflowSetup):
def setUp(self, token="token_xyz", dataset_id="dataset_id_xyz") -> None:
super().setUp(token, dataset_id)
self.api_workflow_client._datasets_api.reset()
def test_create_dataset_existing(self):
with self.assertRaises(ValueError):
self.api_workflow_client.create_dataset(dataset_name="dataset_1")
def test_dataset_name_exists__own_not_existing(self):
assert not self.api_workflow_client.dataset_name_exists(
dataset_name="not_existing_dataset"
)
def test_dataset_exists__raises_error(self):
with self.assertRaises(ApiException) as e:
self.api_workflow_client.dataset_exists(dataset_id=None)
assert e.status != 404
def test_dataset_name_exists__own_existing(self):
assert self.api_workflow_client.dataset_name_exists(dataset_name="dataset_1")
def test_dataset_name_exists__shared_existing(self):
assert self.api_workflow_client.dataset_name_exists(
dataset_name="shared_dataset_1", shared=True
)
def test_dataset_name_exists__shared_not_existing(self):
assert not self.api_workflow_client.dataset_name_exists(
dataset_name="not_existing_dataset", shared=True
)
def test_dataset_name_exists__own_and_shared_existing(self):
assert self.api_workflow_client.dataset_name_exists(
dataset_name="dataset_1", shared=None
)
assert self.api_workflow_client.dataset_name_exists(
dataset_name="shared_dataset_1", shared=None
)
def test_dataset_name_exists__own_and_shared_not_existing(self):
assert not self.api_workflow_client.dataset_name_exists(
dataset_name="not_existing_dataset", shared=None
)
def test_get_datasets_by_name__own_not_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="shared_dataset_1", shared=False
)
assert datasets == []
def test_get_datasets_by_name__own_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="dataset_1", shared=False
)
assert all(dataset.name == "dataset_1" for dataset in datasets)
assert len(datasets) == 1
def test_get_datasets_by_name__shared_not_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="dataset_1", shared=True
)
assert datasets == []
def test_get_datasets_by_name__shared_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="shared_dataset_1", shared=True
)
assert all(dataset.name == "shared_dataset_1" for dataset in datasets)
assert len(datasets) == 1
def test_get_datasets_by_name__own_and_shared_not_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="not_existing_dataset", shared=None
)
assert datasets == []
def test_get_datasets_by_name__own_and_shared_existing(self):
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="dataset_1", shared=None
)
assert all(dataset.name == "dataset_1" for dataset in datasets)
assert len(datasets) == 1
datasets = self.api_workflow_client.get_datasets_by_name(
dataset_name="shared_dataset_1", shared=True
)
assert all(dataset.name == "shared_dataset_1" for dataset in datasets)
assert len(datasets) == 1
def test_get_all_datasets(self):
datasets = self.api_workflow_client.get_all_datasets()
dataset_names = {dataset.name for dataset in datasets}
assert "dataset_1" in dataset_names
assert "shared_dataset_1" in dataset_names
def test_create_new_dataset_with_unique_name__new_name(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "dataset_name_exists", return_value=False)
mocked_create_dataset = mocker.patch.object(
ApiWorkflowClient, "_create_dataset_without_check_existing"
)
dataset_name = "dataset-name"
dataset_type = DatasetType.IMAGES
client = ApiWorkflowClient()
client.create_new_dataset_with_unique_name(
dataset_basename=dataset_name, dataset_type=dataset_type
)
mocked_create_dataset.assert_called_once_with(
dataset_name=dataset_name,
dataset_type=dataset_type,
)
def test_create_new_dataset_with_unique_name__name_exists(
mocker: MockerFixture,
) -> None:
datasets = _get_datasets(1)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "dataset_name_exists", return_value=True)
mocked_create_dataset = mocker.patch.object(
ApiWorkflowClient, "_create_dataset_without_check_existing"
)
mocked_datasets_api = mocker.MagicMock()
dataset_name = datasets[0].name
dataset_type = datasets[0].type
actual_dataset_name = f"{dataset_name}_1"
client = ApiWorkflowClient()
client._datasets_api = mocked_datasets_api
client.create_new_dataset_with_unique_name(
dataset_basename=dataset_name, dataset_type=dataset_type
)
mocked_datasets_api.get_datasets_query_by_name.assert_called_once_with(
dataset_name=dataset_name,
exact=False,
shared=False,
page_offset=0,
page_size=5000,
)
mocked_create_dataset.assert_called_once_with(
dataset_name=actual_dataset_name,
dataset_type=dataset_type,
)
def test_dataset_exists(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_dataset = mocker.patch.object(ApiWorkflowClient, "get_dataset_by_id")
dataset_id = "dataset-id"
client = ApiWorkflowClient()
assert client.dataset_exists(dataset_id)
mocked_get_dataset.assert_called_once_with(dataset_id)
def test_dataset_exists__not_found(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_dataset_by_id", side_effect=ApiException(status=404)
)
client = ApiWorkflowClient()
assert not client.dataset_exists("foo")
def test_dataset_exists__error(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_dataset_by_id", side_effect=RuntimeError("some error")
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.dataset_exists("foo")
assert str(exception.value) == "some error"
def test_dataset_type(mocker: MockerFixture) -> None:
dataset = _get_datasets(1)[0]
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "_get_current_dataset", return_value=dataset)
client = ApiWorkflowClient()
assert client.dataset_type == dataset.type
def test_delete_dataset(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "foo"
client._datasets_api = mock_datasets_api
client.delete_dataset_by_id("foobar")
mock_datasets_api.delete_dataset_by_id.assert_called_once_with(dataset_id="foobar")
assert not hasattr(client, "_dataset_id")
def test_get_datasets__shared(mocker: MockerFixture) -> None:
datasets = _get_datasets(2)
# Returns the same set of datasets twice. API client should remove duplicates
mocked_pagination = mocker.patch.object(
api_workflow_datasets.utils,
"paginate_endpoint",
side_effect=[datasets, datasets],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
datasets = client.get_datasets(shared=True)
unique_dataset_ids = set([dataset.id for dataset in datasets])
assert len(unique_dataset_ids) == len(datasets)
assert mocked_pagination.call_count == 2
call_args = mocked_pagination.call_args_list
assert call_args[0][0] == (mock_datasets_api.get_datasets,)
assert call_args[0][1] == {"shared": True}
assert call_args[1][0] == (mock_datasets_api.get_datasets,)
assert call_args[1][1] == {"get_assets_of_team": True}
def test_get_datasets__not_shared(mocker: MockerFixture) -> None:
mocked_pagination = mocker.patch.object(
api_workflow_datasets.utils, "paginate_endpoint"
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
client.get_datasets(shared=False)
mocked_pagination.assert_called_once_with(
mock_datasets_api.get_datasets, shared=False
)
def test_get_datasets__shared_None(mocker: MockerFixture) -> None:
mocked_pagination = mocker.patch.object(
api_workflow_datasets.utils, "paginate_endpoint"
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
client.get_datasets(shared=None)
assert mocked_pagination.call_count == 3
def test_get_datasets_by_name__not_shared__paginated(mocker: MockerFixture) -> None:
datasets = _get_datasets(3)
# Returns the same set of datasets twice. API client should remove duplicates.
mocked_paginate_endpoint = mocker.patch.object(
api_workflow_datasets.utils,
"paginate_endpoint",
# There's one call to paginate_endpoint.
# It returns a paginated list of datasets.
return_value=iter([datasets[0], datasets[1]]),
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
# Note: because the `dataset_name` filtering is mocked away in this test,
# the `dataset_name` passed as argument and in the returned dataset are independent.
datasets_not_shared = client.get_datasets_by_name(
shared=False, dataset_name="some_random_dataset_name"
)
assert datasets_not_shared == [datasets[0], datasets[1]]
mocked_paginate_endpoint.assert_called_once_with(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
shared=False,
)
def test_get_datasets_by_name__shared__paginated(mocker: MockerFixture) -> None:
datasets = _get_datasets(3)
# Returns the same set of datasets twice. API client should remove duplicates.
mocked_paginate_endpoint = mocker.patch.object(
api_workflow_datasets.utils,
"paginate_endpoint",
side_effect=[
# There are two calls to paginate_endpoint to get all the team's datasets.
iter([datasets[2]]),
iter([]),
],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
# Note: because the `dataset_name` filtering is mocked away in this test,
# the `dataset_name` passed as argument and in the returned dataset are independent.
datasets_shared = client.get_datasets_by_name(
shared=True, dataset_name="some_random_dataset_name"
)
assert datasets_shared == [datasets[2]]
mocked_paginate_endpoint.assert_has_calls(
[
mocker.call(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
shared=True,
),
mocker.call(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
get_assets_of_team=True,
),
]
)
def test_get_datasets_by_name__shared_None__paginated(mocker: MockerFixture) -> None:
datasets = _get_datasets(3)
# Returns the same set of datasets twice. API client should remove duplicates.
mocked_paginate_endpoint = mocker.patch.object(
api_workflow_datasets.utils,
"paginate_endpoint",
side_effect=[
# There are three calls to paginate_endpoint. The first call
# gets all the user's datasets. The second and third calls get
# all the team's datasets.
# The first call returns a paginated list of datasets.
iter([datasets[0], datasets[1]]),
iter([datasets[2]]),
iter([]),
],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_datasets_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasets_api = mock_datasets_api
# Note: because the `dataset_name` filtering is mocked away in this test,
# the `dataset_name` passed as argument and in the returned dataset are independent.
datasets_shared_none = client.get_datasets_by_name(
shared=None, dataset_name="some_random_dataset_name"
)
assert datasets_shared_none == [datasets[0], datasets[1], datasets[2]]
mocked_paginate_endpoint.assert_has_calls(
[
mocker.call(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
shared=False,
),
mocker.call(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
shared=True,
),
mocker.call(
mock_datasets_api.get_datasets_query_by_name,
dataset_name="some_random_dataset_name",
exact=True,
get_assets_of_team=True,
),
]
)
def test_set_dataset_id__error(mocker: MockerFixture):
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_datasets_by_name", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(ValueError) as exception:
client.set_dataset_id_by_name("dataset_1")
assert str(exception.value) == (
"A dataset with the name 'dataset_1' does not exist on the "
"Lightly Platform. Please create it first."
)
def test_set_dataset_id__warning_not_shared(mocker: MockerFixture) -> None:
datasets = _get_datasets(2)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_datasets_by_name", return_value=datasets
)
mocked_warn = mocker.patch("warnings.warn")
client = ApiWorkflowClient()
dataset_name = datasets[0].name
dataset_id = datasets[0].id
client.set_dataset_id_by_name(dataset_name, shared=False)
assert client.dataset_id == dataset_id
mocked_warn.assert_called_once_with(
f"Found 2 datasets with the name '{dataset_name}'. Their "
f"ids are {[dataset.id for dataset in datasets]}. "
f"The dataset_id of the client was set to '{dataset_id}'. "
)
def test_set_dataset_id__warning_shared(mocker: MockerFixture) -> None:
datasets = _get_datasets(2)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_datasets_by_name", return_value=datasets
)
mocked_warn = mocker.patch("warnings.warn")
client = ApiWorkflowClient()
dataset_name = datasets[0].name
dataset_id = datasets[0].id
client.set_dataset_id_by_name(dataset_name, shared=True)
assert client.dataset_id == dataset_id
mocked_warn.assert_called_once_with(
f"Found 2 datasets with the name '{dataset_name}'. Their "
f"ids are {[dataset.id for dataset in datasets]}. "
f"The dataset_id of the client was set to '{dataset_id}'. "
"We noticed that you set shared=True which also retrieves "
"datasets shared with you. Set shared=False to only consider "
"datasets you own."
)
def test_set_dataset_id__success(mocker: MockerFixture) -> None:
datasets = _get_datasets(1)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_datasets_by_name", return_value=datasets
)
client = ApiWorkflowClient()
client.set_dataset_id_by_name(datasets[0].name)
assert client.dataset_id == datasets[0].id
def test_create_dataset(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
client = ApiWorkflowClient()
client._creator = Creator.USER_PIP
client._datasets_api = mocker.create_autospec(DatasetsApi)
client.create_dataset(dataset_name="name")
expected_body = DatasetCreateRequest(
name="name", type=DatasetType.IMAGES, creator=Creator.USER_PIP
)
client._datasets_api.create_dataset.assert_called_once_with(
dataset_create_request=expected_body
)
def test_create_dataset__error(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "dataset_name_exists", return_value=True)
client = ApiWorkflowClient()
with pytest.raises(ValueError) as exception:
client.create_dataset(dataset_name="name")
assert str(exception.value) == (
"A dataset with the name 'name' already exists! Please use "
"the `set_dataset_id_by_name()` method instead if you intend to reuse "
"an existing dataset."
)
| 19,231 | 37.931174 | 88 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_datasources.py | import pytest
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient
from lightly.openapi_generated.swagger_client.models import (
DatasourceConfigAzure,
DatasourceConfigGCS,
DatasourceConfigLOCAL,
DatasourceConfigS3,
DatasourceConfigS3DelegatedAccess,
DatasourceRawSamplesDataRow,
)
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data import (
DatasourceConfigVerifyData,
)
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data_errors import (
DatasourceConfigVerifyDataErrors,
)
def test__download_raw_files(mocker: MockerFixture) -> None:
mock_response_1 = mocker.MagicMock()
mock_response_1.has_more = True
mock_response_1.data = [
DatasourceRawSamplesDataRow(file_name="/file1", read_url="url1"),
DatasourceRawSamplesDataRow(file_name="file2", read_url="url2"),
]
mock_response_2 = mocker.MagicMock()
mock_response_2.has_more = False
mock_response_2.data = [
DatasourceRawSamplesDataRow(file_name="./file3", read_url="url3"),
DatasourceRawSamplesDataRow(file_name="file2", read_url="url2"),
]
mocked_method = mocker.MagicMock(side_effect=[mock_response_1, mock_response_2])
mocked_pbar = mocker.MagicMock()
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_warning = mocker.patch("warnings.warn")
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
result = client._download_raw_files(
download_function=mocked_method,
progress_bar=mocked_pbar,
)
kwargs = mocked_method.call_args[1]
assert "relevant_filenames_file_name" not in kwargs
assert mocked_pbar.update.call_count == 2
assert mocked_warning.call_count == 3
warning_text = [str(call_args[0][0]) for call_args in mocked_warning.call_args_list]
assert warning_text == [
(
"Absolute file paths like /file1 are not supported"
" in relevant filenames file None due to blob storage"
),
(
"Using dot notation ('./', '../') like in ./file3 is not supported"
" in relevant filenames file None due to blob storage"
),
("Duplicate filename file2 in relevant filenames file None"),
]
assert len(result) == 1
assert result[0][0] == "file2"
def test_get_prediction_read_url(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasources_api = mocked_api
client.get_prediction_read_url("test.json")
mocked_method = (
mocked_api.get_prediction_file_read_url_from_datasource_by_dataset_id
)
mocked_method.assert_called_once_with(
dataset_id="dataset-id", file_name="test.json"
)
def test_download_new_raw_samples(mocker: MockerFixture) -> None:
from_timestamp = 2
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_processed_until_timestamp", return_value=from_timestamp
)
current_time = 5
mocker.patch("time.time", return_value=current_time)
mocked_download = mocker.patch.object(ApiWorkflowClient, "download_raw_samples")
mocked_update_timestamp = mocker.patch.object(
ApiWorkflowClient, "update_processed_until_timestamp"
)
client = ApiWorkflowClient()
client.download_new_raw_samples()
mocked_download.assert_called_once_with(
from_=from_timestamp + 1,
to=current_time,
relevant_filenames_file_name=None,
use_redirected_read_url=False,
)
mocked_update_timestamp.assert_called_once_with(timestamp=current_time)
def test_download_new_raw_samples__from_beginning(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_processed_until_timestamp", return_value=0
)
current_time = 5
mocker.patch("time.time", return_value=current_time)
mocked_download = mocker.patch.object(ApiWorkflowClient, "download_raw_samples")
mocked_update_timestamp = mocker.patch.object(
ApiWorkflowClient, "update_processed_until_timestamp"
)
client = ApiWorkflowClient()
client.download_new_raw_samples()
mocked_download.assert_called_once_with(
from_=0,
to=current_time,
relevant_filenames_file_name=None,
use_redirected_read_url=False,
)
mocked_update_timestamp.assert_called_once_with(timestamp=current_time)
def test_download_raw_samples_predictions__relevant_filenames_artifact_id(
mocker: MockerFixture,
) -> None:
mock_response = mocker.MagicMock()
mock_response.has_more = False
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
mocked_method = mocker.MagicMock(return_value=mock_response)
mocked_api.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id = (
mocked_method
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasources_api = mocked_api
client.download_raw_predictions(
task_name="task", run_id="foo", relevant_filenames_artifact_id="bar"
)
kwargs = mocked_method.call_args[1]
assert kwargs.get("relevant_filenames_run_id") == "foo"
assert kwargs.get("relevant_filenames_artifact_id") == "bar"
# should raise ValueError when only run_id is given
with pytest.raises(ValueError):
client.download_raw_predictions(task_name="foobar", run_id="foo")
# should raise ValueError when only relevant_filenames_artifact_id is given
with pytest.raises(ValueError):
client.download_raw_predictions(
task_name="foobar", relevant_filenames_artifact_id="bar"
)
def test_download_raw_samples_metadata__relevant_filenames_artifact_id(
mocker: MockerFixture,
) -> None:
mock_response = mocker.MagicMock()
mock_response.has_more = False
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
mocked_method = mocker.MagicMock(return_value=mock_response)
mocked_api.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id = (
mocked_method
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasources_api = mocked_api
client.download_raw_metadata(run_id="foo", relevant_filenames_artifact_id="bar")
kwargs = mocked_method.call_args[1]
assert kwargs.get("relevant_filenames_run_id") == "foo"
assert kwargs.get("relevant_filenames_artifact_id") == "bar"
# should raise ValueError when only run_id is given
with pytest.raises(ValueError):
client.download_raw_metadata(run_id="foo")
# should raise ValueError when only relevant_filenames_artifact_id is given
with pytest.raises(ValueError):
client.download_raw_metadata(relevant_filenames_artifact_id="bar")
def test_get_processed_until_timestamp(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasources_api = mocked_datasources_api
client.get_processed_until_timestamp()
mocked_method = (
mocked_datasources_api.get_datasource_processed_until_timestamp_by_dataset_id
)
mocked_method.assert_called_once_with(dataset_id="dataset-id")
def test_set_azure_config(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasources_api = mocked_datasources_api
client._dataset_id = "dataset-id"
client.set_azure_config(
container_name="my-container/name",
account_name="my-account-name",
sas_token="my-sas-token",
thumbnail_suffix=".lightly/thumbnails/[filename]-thumb-[extension]",
)
kwargs = mocked_datasources_api.update_datasource_by_dataset_id.call_args[1]
assert isinstance(
kwargs["datasource_config"].actual_instance, DatasourceConfigAzure
)
def test_set_gcs_config(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasources_api = mocked_datasources_api
client._dataset_id = "dataset-id"
client.set_gcs_config(
resource_path="gs://my-bucket/my-dataset",
project_id="my-project-id",
credentials="my-credentials",
thumbnail_suffix=".lightly/thumbnails/[filename]-thumb-[extension]",
)
kwargs = mocked_datasources_api.update_datasource_by_dataset_id.call_args[1]
assert isinstance(kwargs["datasource_config"].actual_instance, DatasourceConfigGCS)
def test_set_local_config(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasources_api = mocked_datasources_api
client._dataset_id = "dataset-id"
client.set_local_config(
resource_path="http://localhost:1234/path/to/my/data",
thumbnail_suffix=".lightly/thumbnails/[filename]-thumb-[extension]",
)
kwargs = mocked_datasources_api.update_datasource_by_dataset_id.call_args[1]
assert isinstance(
kwargs["datasource_config"].actual_instance, DatasourceConfigLOCAL
)
def test_set_s3_config(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasources_api = mocked_datasources_api
client._dataset_id = "dataset-id"
client.set_s3_config(
resource_path="s3://my-bucket/my-dataset",
thumbnail_suffix=".lightly/thumbnails/[filename]-thumb-[extension]",
region="eu-central-1",
access_key="my-access-key",
secret_access_key="my-secret-access-key",
)
kwargs = mocked_datasources_api.update_datasource_by_dataset_id.call_args[1]
assert isinstance(kwargs["datasource_config"].actual_instance, DatasourceConfigS3)
def test_set_s3_delegated_access_config(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._datasources_api = mocked_datasources_api
client._dataset_id = "dataset-id"
client.set_s3_delegated_access_config(
resource_path="s3://my-bucket/my-dataset",
thumbnail_suffix=".lightly/thumbnails/[filename]-thumb-[extension]",
region="eu-central-1",
role_arn="arn:aws:iam::000000000000:role.test",
external_id="my-external-id",
)
kwargs = mocked_datasources_api.update_datasource_by_dataset_id.call_args[1]
assert isinstance(
kwargs["datasource_config"].actual_instance, DatasourceConfigS3DelegatedAccess
)
def test_update_processed_until_timestamp(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_datasources_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasources_api = mocked_datasources_api
client.update_processed_until_timestamp(10)
kwargs = mocked_datasources_api.update_datasource_processed_until_timestamp_by_dataset_id.call_args[
1
]
assert kwargs["dataset_id"] == "dataset-id"
assert (
kwargs["datasource_processed_until_timestamp_request"].processed_until_timestamp
== 10
)
def test_list_datasource_permissions(mocker: MockerFixture) -> None:
client = ApiWorkflowClient(token="abc")
client._dataset_id = "dataset-id"
client._datasources_api.verify_datasource_by_dataset_id = mocker.MagicMock(
return_value=DatasourceConfigVerifyData(
canRead=True,
canWrite=True,
canList=False,
canOverwrite=True,
errors=None,
),
)
assert client.list_datasource_permissions() == {
"can_read": True,
"can_write": True,
"can_list": False,
"can_overwrite": True,
}
def test_list_datasource_permissions__error(mocker: MockerFixture) -> None:
client = ApiWorkflowClient(token="abc")
client._dataset_id = "dataset-id"
client._datasources_api.verify_datasource_by_dataset_id = mocker.MagicMock(
return_value=DatasourceConfigVerifyData(
canRead=True,
canWrite=True,
canList=False,
canOverwrite=True,
errors=DatasourceConfigVerifyDataErrors(
canRead=None, canWrite=None, canList="error message", canOverwrite=None
),
),
)
assert client.list_datasource_permissions() == {
"can_read": True,
"can_write": True,
"can_list": False,
"can_overwrite": True,
"errors": {
"can_list": "error message",
},
}
| 13,436 | 38.174927 | 104 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_download_dataset.py | import pytest
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, api_workflow_download_dataset
from lightly.openapi_generated.swagger_client.models import (
DatasetData,
DatasetEmbeddingData,
DatasetType,
ImageType,
TagData,
)
from tests.api_workflow import utils
def test_download_dataset__no_image(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
mocked_get_dataset_by_id = mocker.MagicMock(
return_value=DatasetData(
name="dataset",
id=dataset_id,
user_id=utils.generate_id(),
last_modified_at=0,
type=DatasetType.IMAGES,
img_type=ImageType.META,
size_in_bytes=-1,
n_samples=-1,
created_at=0,
)
)
mocked_api.get_dataset_by_id = mocked_get_dataset_by_id
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._datasets_api = mocked_api
with pytest.raises(ValueError) as exception:
client.download_dataset(output_dir="path/to/dir")
assert (
str(exception.value)
== f"Dataset with id {dataset_id} has no downloadable images!"
)
def test_download_dataset__tag_missing(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
mocked_get_dataset_by_id = mocker.MagicMock(
return_value=DatasetData(
name="dataset",
id=utils.generate_id(),
user_id=utils.generate_id(),
last_modified_at=0,
type=DatasetType.IMAGES,
img_type=ImageType.FULL,
size_in_bytes=-1,
n_samples=-1,
created_at=0,
)
)
mocked_api.get_dataset_by_id = mocked_get_dataset_by_id
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasets_api = mocked_api
with pytest.raises(ValueError) as exception:
client.download_dataset(output_dir="path/to/dir", tag_name="some-tag")
assert str(exception.value) == "Dataset with id dataset-id has no tag some-tag!"
def test_download_dataset__ok(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
mocked_get_dataset_by_id = mocker.MagicMock(
return_value=DatasetData(
name="dataset",
id=dataset_id,
user_id=utils.generate_id(),
last_modified_at=0,
type=DatasetType.IMAGES,
img_type=ImageType.FULL,
size_in_bytes=-1,
n_samples=-1,
created_at=0,
)
)
mocked_datasets_api = mocker.MagicMock()
mocked_datasets_api.get_dataset_by_id = mocked_get_dataset_by_id
mocked_get_sample_mappings_by_dataset_id = mocker.MagicMock(return_value=[1])
mocked_mappings_api = mocker.MagicMock()
mocked_mappings_api.get_sample_mappings_by_dataset_id = (
mocked_get_sample_mappings_by_dataset_id
)
mocked_get_sample_image_read_url_by_id = mocker.MagicMock(
side_effect=RuntimeError("some error")
)
mocked_samples_api = mocker.MagicMock()
mocked_samples_api.get_sample_image_read_url_by_id = (
mocked_get_sample_image_read_url_by_id
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=[
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name="some-tag",
tot_size=4,
created_at=1577836800,
changes=[],
)
],
)
mocker.patch.object(
ApiWorkflowClient, "get_filenames", return_value=[f"file{i}" for i in range(3)]
)
mocker.patch.object(api_workflow_download_dataset, "_get_image_from_read_url")
mocker.patch.object(api_workflow_download_dataset, "_make_dir_and_save_image")
mocked_warning = mocker.patch("warnings.warn")
mocker.patch("tqdm.tqdm")
mocked_executor = mocker.patch.object(
api_workflow_download_dataset, "ThreadPoolExecutor"
)
mocked_executor.return_value.__enter__.return_value.map = (
lambda fn, iterables, **_: map(fn, iterables)
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._datasets_api = mocked_datasets_api
client._mappings_api = mocked_mappings_api
client._samples_api = mocked_samples_api
client.download_dataset(output_dir="path/to/dir", tag_name="some-tag")
assert mocked_warning.call_count == 2
warning_text = [str(call_args[0][0]) for call_args in mocked_warning.call_args_list]
assert warning_text == [
"Downloading of image file0 failed with error some error",
"Warning: Unsuccessful download! Failed at image: 0",
]
def test_get_embedding_data_by_name(mocker: MockerFixture) -> None:
embedding_0 = DatasetEmbeddingData(
id=utils.generate_id(),
name="embedding_0",
created_at=0,
is_processed=False,
)
embedding_1 = DatasetEmbeddingData(
id=utils.generate_id(),
name="embedding_1",
created_at=1,
is_processed=False,
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_embedding_data",
return_value=[embedding_0, embedding_1],
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
embedding = client.get_embedding_data_by_name(name="embedding_0")
assert embedding == embedding_0
def test_get_embedding_data_by_name__no_embedding_with_name(
mocker: MockerFixture,
) -> None:
embedding = DatasetEmbeddingData(
id=utils.generate_id(),
name="embedding",
created_at=0,
is_processed=False,
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_embedding_data", return_value=[embedding]
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
with pytest.raises(ValueError) as exception:
client.get_embedding_data_by_name(name="other_embedding")
assert str(exception.value) == (
"There are no embeddings with name 'other_embedding' "
"for dataset with id 'dataset-id'."
)
def test_download_embeddings_csv_by_id(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_download = mocker.patch.object(
api_workflow_download_dataset.download, "download_and_write_file"
)
mocked_api = mocker.MagicMock()
mocked_get_embeddings_csv_read_url_by_id = mocker.MagicMock(return_value="read_url")
mocked_api.get_embeddings_csv_read_url_by_id = (
mocked_get_embeddings_csv_read_url_by_id
)
mocker.patch.object(
api_workflow_download_dataset,
"_get_latest_default_embedding_data",
return_value=None,
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._embeddings_api = mocked_api
client.download_embeddings_csv_by_id(
embedding_id="embedding_id",
output_path="embeddings.csv",
)
mocked_get_embeddings_csv_read_url_by_id.assert_called_once_with(
dataset_id="dataset-id",
embedding_id="embedding_id",
)
mocked_download.assert_called_once_with(
url="read_url",
output_path="embeddings.csv",
)
def test_download_embeddings_csv(mocker: MockerFixture) -> None:
embedding_id = utils.generate_id()
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mock_get_all_embedding_data = mocker.patch.object(
api_workflow_download_dataset,
"_get_latest_default_embedding_data",
return_value=DatasetEmbeddingData(
id=embedding_id,
name="default_20221209_10h45m49s",
created_at=0,
is_processed=False,
),
)
mocker.patch.object(ApiWorkflowClient, "get_all_embedding_data")
mock_download_embeddings_csv_by_id = mocker.patch.object(
ApiWorkflowClient,
"download_embeddings_csv_by_id",
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client.download_embeddings_csv(output_path="embeddings.csv")
mock_get_all_embedding_data.assert_called_once()
mock_download_embeddings_csv_by_id.assert_called_once_with(
embedding_id=embedding_id,
output_path="embeddings.csv",
)
def test_download_embeddings_csv__no_default_embedding(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_all_embedding_data = mocker.patch.object(
ApiWorkflowClient, "get_all_embedding_data", return_value=[]
)
mocker.patch.object(
api_workflow_download_dataset,
"_get_latest_default_embedding_data",
return_value=None,
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
with pytest.raises(RuntimeError) as exception:
client.download_embeddings_csv(output_path="embeddings.csv")
assert (
str(exception.value)
== "Could not find embeddings for dataset with id 'dataset-id'."
)
mocked_get_all_embedding_data.assert_called_once()
def test__get_latest_default_embedding_data__no_default_embedding() -> None:
custom_embedding = DatasetEmbeddingData(
id=utils.generate_id(),
name="custom-name",
created_at=0,
is_processed=False,
)
embedding = api_workflow_download_dataset._get_latest_default_embedding_data(
embeddings=[custom_embedding]
)
assert embedding is None
| 10,120 | 33.42517 | 88 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_export.py | from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, api_workflow_export
from lightly.api import utils as api_utils
from lightly.openapi_generated.swagger_client.models import FileNameFormat, TagData
from tests.api_workflow import utils
def _get_tag(dataset_id: str, tag_name: str) -> TagData:
return TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
def test_export_filenames_by_tag_id(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
mocked_paginate = mocker.patch.object(
api_utils,
"paginate_endpoint",
side_effect=[iter(["file0\nfile1"])],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._tags_api = mocked_api
data = client.export_filenames_by_tag_id(tag_id="tag_id")
assert data == "file0\nfile1"
mocked_paginate.assert_called_once_with(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
)
def test_export_filenames_by_tag_id__two_pages(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
mocked_paginate = mocker.patch.object(
api_utils,
"paginate_endpoint",
side_effect=[
# Simulate two pages.
iter(["file0\nfile1", "file2\nfile3"])
],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._tags_api = mocked_api
data = client.export_filenames_by_tag_id(tag_id="tag_id")
assert data == "file0\nfile1\nfile2\nfile3"
mocked_paginate.assert_called_once_with(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
)
def test_export_filenames_and_read_urls_by_tag_id(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
mocked_paginate = mocker.patch.object(
api_utils,
"paginate_endpoint",
side_effect=[
iter(["file0\nfile1"]),
iter(["read_url0\nread_url1"]),
iter(["datasource_url0\ndatasource_url1"]),
],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._tags_api = mocked_api
data = client.export_filenames_and_read_urls_by_tag_id(tag_id="tag_id")
assert data == [
{
"fileName": "file0",
"readUrl": "read_url0",
"datasourceUrl": "datasource_url0",
},
{
"fileName": "file1",
"readUrl": "read_url1",
"datasourceUrl": "datasource_url1",
},
]
mocked_paginate.assert_has_calls(
[
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.NAME,
),
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.REDIRECTED_READ_URL,
),
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.DATASOURCE_FULL,
),
]
)
def test_export_filenames_and_read_urls_by_tag_id__two_pages(
mocker: MockerFixture,
) -> None:
dataset_id = utils.generate_id()
mocked_paginate = mocker.patch.object(
api_utils,
"paginate_endpoint",
side_effect=[
# Simulate two pages.
iter(["file0\nfile1", "file2\nfile3"]),
iter(["read_url0\nread_url1", "read_url2\nread_url3"]),
iter(
["datasource_url0\ndatasource_url1", "datasource_url2\ndatasource_url3"]
),
],
)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._tags_api = mocked_api
data = client.export_filenames_and_read_urls_by_tag_id(tag_id="tag_id")
assert data == [
{
"fileName": "file0",
"readUrl": "read_url0",
"datasourceUrl": "datasource_url0",
},
{
"fileName": "file1",
"readUrl": "read_url1",
"datasourceUrl": "datasource_url1",
},
{
"fileName": "file2",
"readUrl": "read_url2",
"datasourceUrl": "datasource_url2",
},
{
"fileName": "file3",
"readUrl": "read_url3",
"datasourceUrl": "datasource_url3",
},
]
mocked_paginate.assert_has_calls(
[
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.NAME,
),
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.REDIRECTED_READ_URL,
),
mocker.call(
mocked_api.export_tag_to_basic_filenames,
dataset_id=dataset_id,
tag_id="tag_id",
file_name_format=FileNameFormat.DATASOURCE_FULL,
),
]
)
def test_export_filenames_by_tag_name(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tag_name = "some-tag"
tag = _get_tag(dataset_id=dataset_id, tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_tag = mocker.patch.object(
ApiWorkflowClient, "get_tag_by_name", return_value=tag
)
mocked_export = mocker.patch.object(ApiWorkflowClient, "export_filenames_by_tag_id")
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client.export_filenames_by_tag_name(tag_name)
mocked_get_tag.assert_called_once_with(tag_name)
mocked_export.assert_called_once_with(tag.id)
def test_export_label_box_data_rows_by_tag_id(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_paginate = mocker.patch.object(
api_workflow_export.utils, "paginate_endpoint"
)
mocked_api = mocker.MagicMock()
mocked_warning = mocker.patch("warnings.warn")
client = ApiWorkflowClient()
client._dataset_id = utils.generate_id()
client._tags_api = mocked_api
client.export_label_box_data_rows_by_tag_id(tag_id="tag_id")
mocked_paginate.assert_called_once()
call_args = mocked_paginate.call_args[0]
assert call_args[0] == mocked_api.export_tag_to_label_box_data_rows
warning_text = str(mocked_warning.call_args[0][0])
assert warning_text == (
"This method exports data in the deprecated Labelbox v3 format and "
"will be removed in the future. Use export_label_box_v4_data_rows_by_tag_id "
"to export data in the Labelbox v4 format instead."
)
def test_export_label_box_data_rows_by_tag_name(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tag_name = "some-tag"
tag = _get_tag(dataset_id=dataset_id, tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_tag = mocker.patch.object(
ApiWorkflowClient, "get_tag_by_name", return_value=tag
)
mocked_export = mocker.patch.object(
ApiWorkflowClient, "export_label_box_data_rows_by_tag_id"
)
mocked_warning = mocker.patch("warnings.warn")
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client.export_label_box_data_rows_by_tag_name(tag_name)
mocked_get_tag.assert_called_once_with(tag_name)
mocked_export.assert_called_once_with(tag.id)
warning_text = str(mocked_warning.call_args[0][0])
assert warning_text == (
"This method exports data in the deprecated Labelbox v3 format and "
"will be removed in the future. Use export_label_box_v4_data_rows_by_tag_name "
"to export data in the Labelbox v4 format instead."
)
def test_export_label_box_v4_data_rows_by_tag_id(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_paginate = mocker.patch.object(
api_workflow_export.utils, "paginate_endpoint"
)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = utils.generate_id()
client._tags_api = mocked_api
client.export_label_box_v4_data_rows_by_tag_id(tag_id="tag_id")
mocked_paginate.assert_called_once()
call_args = mocked_paginate.call_args[0]
assert call_args[0] == mocked_api.export_tag_to_label_box_v4_data_rows
def test_export_label_box_v4_data_rows_by_tag_name(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tag_name = "some-tag"
tag = _get_tag(dataset_id=dataset_id, tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_tag = mocker.patch.object(
ApiWorkflowClient, "get_tag_by_name", return_value=tag
)
mocked_export = mocker.patch.object(
ApiWorkflowClient, "export_label_box_v4_data_rows_by_tag_id"
)
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client.export_label_box_v4_data_rows_by_tag_name(tag_name)
mocked_get_tag.assert_called_once_with(tag_name)
mocked_export.assert_called_once_with(tag.id)
def test_export_label_studio_tasks_by_tag_name(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tag_name = "some-tag"
tag = _get_tag(dataset_id=dataset_id, tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_tag = mocker.patch.object(
ApiWorkflowClient, "get_tag_by_name", return_value=tag
)
mocked_export = mocker.patch.object(
ApiWorkflowClient, "export_label_studio_tasks_by_tag_id"
)
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client.export_label_studio_tasks_by_tag_name(tag_name)
mocked_get_tag.assert_called_once_with(tag_name)
mocked_export.assert_called_once_with(tag.id)
| 10,921 | 34.232258 | 88 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_predictions.py | from unittest.mock import MagicMock, call
from lightly.api import ApiWorkflowClient
from lightly.openapi_generated.swagger_client.api import PredictionsApi
from lightly.openapi_generated.swagger_client.models import (
PredictionSingletonClassification,
PredictionTaskSchema,
PredictionTaskSchemaCategory,
TaskType,
)
def test_create_or_update_prediction_task_schema() -> None:
mocked_client = MagicMock(spec=ApiWorkflowClient)
mocked_client.dataset_id = "some_dataset_id"
mocked_client._predictions_api = MagicMock(spec_set=PredictionsApi)
schema = PredictionTaskSchema.from_dict(
{
"name": "my-object-detection",
"type": TaskType.OBJECT_DETECTION,
"categories": [
PredictionTaskSchemaCategory(id=0, name="dog").to_dict(),
PredictionTaskSchemaCategory(id=1, name="cat").to_dict(),
],
}
)
timestamp = 1234
ApiWorkflowClient.create_or_update_prediction_task_schema(
self=mocked_client,
schema=schema,
prediction_version_id=timestamp,
)
mocked_client._predictions_api.create_or_update_prediction_task_schema_by_dataset_id.assert_called_once_with(
prediction_task_schema=schema,
dataset_id=mocked_client.dataset_id,
prediction_uuid_timestamp=timestamp,
)
def test_create_or_update_prediction() -> None:
mocked_client = MagicMock(spec=ApiWorkflowClient)
mocked_client.dataset_id = "some_dataset_id"
mocked_client._predictions_api = MagicMock(spec_set=PredictionsApi)
prediction_singletons = [
PredictionSingletonClassification(
type="CLASSIFICATION",
taskName="my-task",
categoryId=1,
score=0.9,
probabilities=[0.1, 0.2, 0.3, 0.4],
)
]
sample_id = "some_sample_id"
timestamp = 1234
ApiWorkflowClient.create_or_update_prediction(
self=mocked_client,
sample_id=sample_id,
prediction_singletons=prediction_singletons,
prediction_version_id=timestamp,
)
mocked_client._predictions_api.create_or_update_prediction_by_sample_id.assert_called_once_with(
prediction_singleton=prediction_singletons,
dataset_id=mocked_client.dataset_id,
sample_id=sample_id,
prediction_uuid_timestamp=timestamp,
)
| 2,377 | 32.027778 | 113 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_selection.py | from typing import List
import pytest
from pytest_mock import MockerFixture
from lightly.active_learning.config.selection_config import SelectionConfig
from lightly.api import ApiWorkflowClient, api_workflow_selection
from lightly.openapi_generated.swagger_client.models import (
JobResultType,
JobState,
JobStatusData,
JobStatusDataResult,
SamplingCreateRequest,
SamplingMethod,
TagData,
)
from tests.api_workflow import utils
def _get_tags(dataset_id: str, tag_name: str = "just-a-tag") -> List[TagData]:
return [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=None,
bit_mask_data="0x1",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
]
def _get_sampling_create_request(tag_name: str = "new-tag") -> SamplingCreateRequest:
return SamplingCreateRequest(
new_tag_name=tag_name,
method=SamplingMethod.RANDOM,
config={},
)
def test_selection__tag_exists(mocker: MockerFixture) -> None:
tag_name = "some-tag"
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=_get_tags(dataset_id=utils.generate_id(), tag_name=tag_name),
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name=tag_name))
assert (
str(exception.value) == "There already exists a tag with tag_name some-tag"
)
def test_selection__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "There exists no initial-tag for this dataset."
def test_selection(mocker: MockerFixture) -> None:
tag_name = "some-tag"
dataset_id = utils.generate_id()
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = utils.generate_id()
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FINISHED,
result=JobStatusDataResult(type=JobResultType.SAMPLING, data="new-tag-id"),
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
mocked_tags_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._tags_api = mocked_tags_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
client.selection(selection_config=SelectionConfig(name=tag_name))
mocked_get_job_status.assert_called_once()
mocked_tags_api.get_tag_by_tag_id.assert_called_once_with(
dataset_id=dataset_id, tag_id="new-tag-id"
)
def test_selection__job_failed(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
return_value=JobStatusData(
id=utils.generate_id(),
wait_time_till_next_poll=1,
created_at=0,
status=JobState.FAILED,
error="bad job",
)
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(RuntimeError) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == (
"Selection job with job_id some-job-id failed with error bad job"
)
def test_selection__too_many_errors(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
job_id = "some-job-id"
mocker.patch("time.sleep")
mocked_print = mocker.patch("builtins.print")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient, "get_all_tags", return_value=_get_tags(dataset_id=dataset_id)
)
mocker.patch.object(
ApiWorkflowClient,
"_create_selection_create_request",
return_value=_get_sampling_create_request(),
)
mocked_selection_api = mocker.MagicMock()
mocked_sampling_response = mocker.MagicMock()
mocked_sampling_response.job_id = job_id
mocked_selection_api.trigger_sampling_by_id.return_value = mocked_sampling_response
mocked_jobs_api = mocker.MagicMock()
mocked_get_job_status = mocker.MagicMock(
side_effect=[Exception("surprise!") for _ in range(20)]
)
mocked_jobs_api.get_job_status_by_id = mocked_get_job_status
client = ApiWorkflowClient()
client._selection_api = mocked_selection_api
client._jobs_api = mocked_jobs_api
client._dataset_id = dataset_id
client.embedding_id = "embedding-id"
with pytest.raises(Exception) as exception:
client.selection(selection_config=SelectionConfig(name="some-tag"))
assert str(exception.value) == "surprise!"
mocked_print.assert_called_once_with(
"Selection job with job_id some-job-id could not be started "
"because of error: surprise!"
)
def test_upload_scores(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tags = _get_tags(dataset_id=dataset_id, tag_name="initial-tag")
tag_id = tags[0].id
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(
ApiWorkflowClient,
"get_all_tags",
return_value=tags,
)
mocker.patch.object(
api_workflow_selection, "_parse_active_learning_scores", return_value=[1]
)
mocked_api = mocker.MagicMock()
mocked_create_score = mocked_api.create_or_update_active_learning_score_by_tag_id
client = ApiWorkflowClient()
client._scores_api = mocked_api
client._dataset_id = dataset_id
mocked_create_score.reset_mock()
client.upload_scores(al_scores={"score_type": [1, 2, 3]}, query_tag_id=tag_id)
mocked_create_score.assert_called_once()
kwargs = mocked_create_score.call_args[1]
assert kwargs.get("tag_id") == tag_id
| 7,897 | 33.33913 | 88 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_tags.py | from typing import List, Optional
import pytest
from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient
from lightly.api.api_workflow_tags import TagDoesNotExistError
from lightly.openapi_generated.swagger_client.models import TagCreator, TagData
from tests.api_workflow import utils
def _get_tags(
dataset_id: str, tag_name: str = "just-a-tag", prev_tag_id: Optional[str] = None
) -> List[TagData]:
return [
TagData(
id=utils.generate_id(),
dataset_id=dataset_id,
prev_tag_id=prev_tag_id,
bit_mask_data="0x5",
name=tag_name,
tot_size=4,
created_at=1577836800,
changes=[],
)
]
def test_create_tag_from_filenames(mocker: MockerFixture) -> None:
dataset_id = utils.generate_id()
tags = _get_tags(dataset_id=dataset_id, tag_name="initial-tag")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=tags)
mocked_get_filenames = mocker.patch.object(
ApiWorkflowClient, "get_filenames", return_value=[f"file{i}" for i in range(3)]
)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._tags_api = mocked_api
client._dataset_id = dataset_id
client._creator = TagCreator.UNKNOWN
client.create_tag_from_filenames(fnames_new_tag=["file2"], new_tag_name="some-tag")
mocked_get_filenames.assert_called_once()
mocked_api.create_tag_by_dataset_id.assert_called_once()
kwargs = mocked_api.create_tag_by_dataset_id.call_args[1]
# initial-tag is used as prev_tag_id when parent_tag_id is not given
assert kwargs["tag_create_request"].prev_tag_id == tags[0].id
assert kwargs["tag_create_request"].bit_mask_data == "0x4"
def test_create_tag_from_filenames__tag_exists(mocker: MockerFixture) -> None:
tag_name = "some-tag"
tags = _get_tags(dataset_id=utils.generate_id(), tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=tags)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.create_tag_from_filenames(fnames_new_tag=["file"], new_tag_name=tag_name)
assert (
str(exception.value) == "There already exists a tag with tag_name some-tag"
)
def test_create_tag_from_filenames__no_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.create_tag_from_filenames(
fnames_new_tag=["file"], new_tag_name="some-tag"
)
assert str(exception.value) == "There exists no initial-tag for this dataset."
def test_create_tag_from_filenames__file_not_found(mocker: MockerFixture) -> None:
tags = _get_tags(dataset_id=utils.generate_id(), tag_name="initial-tag")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=tags)
mocked_get_filenames = mocker.patch.object(
ApiWorkflowClient, "get_filenames", return_value=[f"file{i}" for i in range(3)]
)
client = ApiWorkflowClient()
with pytest.raises(RuntimeError) as exception:
client.create_tag_from_filenames(
fnames_new_tag=["some-file"], new_tag_name="some-tag"
)
assert str(exception.value) == (
"An error occured when creating the new subset! "
"Out of the 1 filenames you provided "
"to create a new tag, only 0 have been found on the server. "
"Make sure you use the correct filenames. "
"Valid filename example from the dataset: file0"
)
mocked_get_filenames.assert_called_once()
def test_get_filenames_in_tag(mocker: MockerFixture) -> None:
tag = _get_tags(dataset_id=utils.generate_id())[0]
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_filenames = mocker.patch.object(
ApiWorkflowClient, "get_filenames", return_value=[f"file{i}" for i in range(3)]
)
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
result = client.get_filenames_in_tag(tag_data=tag)
assert result == ["file0", "file2"]
mocked_get_filenames.assert_called_once()
def test_get_filenames_in_tag__filenames_given(mocker: MockerFixture) -> None:
tag = _get_tags(dataset_id=utils.generate_id())[0]
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_filenames = mocker.patch.object(ApiWorkflowClient, "get_filenames")
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
result = client.get_filenames_in_tag(
tag_data=tag, filenames_on_server=[f"new-file-{i}" for i in range(3)]
)
assert result == ["new-file-0", "new-file-2"]
mocked_get_filenames.assert_not_called()
def test_get_filenames_in_tag__exclude_parent_tag(mocker: MockerFixture) -> None:
prev_tag_id = utils.generate_id()
dataset_id = utils.generate_id()
tag = _get_tags(dataset_id=dataset_id, prev_tag_id=prev_tag_id)[0]
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_get_filenames = mocker.patch.object(
ApiWorkflowClient, "get_filenames", return_value=[f"file{i}" for i in range(3)]
)
mocked_response = mocker.MagicMock()
mocked_response.bit_mask_data = "0x2"
mocked_tag_arithmetics = mocker.MagicMock(return_value=mocked_response)
mocked_api = mocker.MagicMock()
mocked_api.perform_tag_arithmetics_bitmask = mocked_tag_arithmetics
client = ApiWorkflowClient()
client._dataset_id = dataset_id
client._tags_api = mocked_api
result = client.get_filenames_in_tag(tag_data=tag, exclude_parent_tag=True)
assert result == ["file1"]
mocked_get_filenames.assert_called_once()
mocked_tag_arithmetics.assert_called_once()
kwargs = mocked_tag_arithmetics.call_args[1]
assert kwargs["dataset_id"] == dataset_id
assert kwargs["tag_arithmetics_request"].tag_id2 == prev_tag_id
def test_get_all_tags(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._tags_api = mocked_api
client.get_all_tags()
mocked_api.get_tags_by_dataset_id.assert_called_once_with("dataset-id")
def test_get_tag_by_id(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._tags_api = mocked_api
client.get_tag_by_id("tag-id")
mocked_api.get_tag_by_tag_id.assert_called_once_with(
dataset_id="dataset-id", tag_id="tag-id"
)
def test_get_tag_name(mocker: MockerFixture) -> None:
tag_name = "some-tag"
tags = _get_tags(dataset_id=utils.generate_id(), tag_name=tag_name)
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=tags)
mocked_get_tag = mocker.patch.object(ApiWorkflowClient, "get_tag_by_id")
client = ApiWorkflowClient()
client.get_tag_by_name(tag_name=tag_name)
mocked_get_tag.assert_called_once_with(tags[0].id)
def test_get_tag_name__nonexisting(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocker.patch.object(ApiWorkflowClient, "get_all_tags", return_value=[])
client = ApiWorkflowClient()
with pytest.raises(TagDoesNotExistError) as exception:
client.get_tag_by_name(tag_name="some-tag")
assert str(exception.value) == "Your tag_name does not exist: some-tag"
def test_delete_tag_by_id(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
mocked_api = mocker.MagicMock()
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._tags_api = mocked_api
client.delete_tag_by_id("tag-id")
mocked_api.delete_tag_by_tag_id.assert_called_once_with(
dataset_id="dataset-id", tag_id="tag-id"
)
| 8,539 | 38.72093 | 88 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_upload_custom_metadata.py | from pytest_mock import MockerFixture
from lightly.api import ApiWorkflowClient, api_workflow_upload_metadata
from lightly.openapi_generated.swagger_client.models import (
SampleDataModes,
SamplePartialMode,
SampleUpdateRequest,
)
from lightly.utils.io import COCO_ANNOTATION_KEYS
from tests.api_workflow import utils
def test_index_custom_metadata_by_filename(mocker: MockerFixture) -> None:
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
custom_metadata = {}
custom_metadata[COCO_ANNOTATION_KEYS.images] = [
{
COCO_ANNOTATION_KEYS.images_filename: "file0",
COCO_ANNOTATION_KEYS.images_id: "image-id0",
},
{
COCO_ANNOTATION_KEYS.images_filename: "file1",
COCO_ANNOTATION_KEYS.images_id: "image-id1",
},
]
custom_metadata[COCO_ANNOTATION_KEYS.custom_metadata] = [
{COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id2"},
{COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id0"},
]
client = ApiWorkflowClient()
result = client.index_custom_metadata_by_filename(custom_metadata=custom_metadata)
assert result == {
"file0": {COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id0"},
"file1": None,
}
def test_upload_custom_metadata(mocker: MockerFixture) -> None:
mocker.patch("tqdm.tqdm")
mocker.patch.object(ApiWorkflowClient, "__init__", return_value=None)
# retry should be called twice: once for get_samples_partial_by_dataset_id
# and once for update_sample_by_id. get_samples_partial_by_dataset_id returns
# only one valid sample file `file1`
dummy_sample = SampleDataModes(id=utils.generate_id(), file_name="file1")
mocked_paginate_endpoint = mocker.patch.object(
api_workflow_upload_metadata,
"paginate_endpoint",
side_effect=[
[dummy_sample],
None,
],
)
mocked_retry = mocker.patch.object(
api_workflow_upload_metadata,
"retry",
side_effect=[
[dummy_sample],
None,
],
)
mocked_print_warning = mocker.patch.object(
api_workflow_upload_metadata.hipify, "print_as_warning"
)
mocked_executor = mocker.patch.object(
api_workflow_upload_metadata, "ThreadPoolExecutor"
)
mocked_executor.return_value.__enter__.return_value.map = (
lambda fn, iterables, **_: map(fn, iterables)
)
mocked_samples_api = mocker.MagicMock()
custom_metadata = {}
custom_metadata[COCO_ANNOTATION_KEYS.images] = [
{
COCO_ANNOTATION_KEYS.images_filename: "file0",
COCO_ANNOTATION_KEYS.images_id: "image-id0",
},
{
COCO_ANNOTATION_KEYS.images_filename: "file1",
COCO_ANNOTATION_KEYS.images_id: "image-id1",
},
]
custom_metadata[COCO_ANNOTATION_KEYS.custom_metadata] = [
{COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id2"},
{COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id1"},
{COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id0"},
]
client = ApiWorkflowClient()
client._dataset_id = "dataset-id"
client._samples_api = mocked_samples_api
client.upload_custom_metadata(custom_metadata=custom_metadata)
# Only `file1` is a valid sample
assert mocked_print_warning.call_count == 2
warning_text = [
call_args[0][0] for call_args in mocked_print_warning.call_args_list
]
assert warning_text == [
(
"No image found for custom metadata annotation with image_id image-id2. "
"This custom metadata annotation is skipped. "
),
(
"You tried to upload custom metadata for a sample with filename {file0}, "
"but a sample with this filename does not exist on the server. "
"This custom metadata annotation is skipped. "
),
]
# First call: get_samples_partial_by_dataset_id
mocked_paginate_endpoint.assert_called_once_with(
mocked_samples_api.get_samples_partial_by_dataset_id,
dataset_id="dataset-id",
mode=SamplePartialMode.FILENAMES,
page_size=25000,
)
# Second call: update_sample_by_id with the only valid sample
mocked_retry.assert_called_once_with(
mocked_samples_api.update_sample_by_id,
dataset_id="dataset-id",
sample_id=dummy_sample.id,
sample_update_request=SampleUpdateRequest(
custom_meta_data={
COCO_ANNOTATION_KEYS.custom_metadata_image_id: "image-id1"
}
),
)
| 4,671 | 34.938462 | 86 | py |
lightly | lightly-master/tests/api_workflow/test_api_workflow_upload_embeddings.py | import os
import tempfile
import numpy as np
from lightly.utils import io as io_utils
from lightly.utils.io import INVALID_FILENAME_CHARACTERS
from tests.api_workflow.mocked_api_workflow_client import (
N_FILES_ON_SERVER,
MockedApiWorkflowSetup,
)
class TestApiWorkflowUploadEmbeddings(MockedApiWorkflowSetup):
def create_fake_embeddings(
self,
n_data,
n_data_start: int = 0,
n_dims: int = 32,
special_name_first_sample: bool = False,
special_char_in_first_filename: str = None,
):
# create fake embeddings
self.folder_path = tempfile.mkdtemp()
self.path_to_embeddings = os.path.join(self.folder_path, "embeddings.csv")
self.sample_names = [
f"img_{i}.jpg" for i in range(n_data_start, n_data_start + n_data)
]
if special_name_first_sample:
self.sample_names[0] = "bliblablub"
if special_char_in_first_filename:
self.sample_names[0] = (
f"_{special_char_in_first_filename}" f"{self.sample_names[0]}"
)
labels = [0] * len(self.sample_names)
io_utils.save_embeddings(
self.path_to_embeddings,
np.random.randn(n_data, n_dims),
labels,
self.sample_names,
)
def t_ester_upload_embedding(
self,
n_data,
n_dims: int = 32,
special_name_first_sample: bool = False,
special_char_in_first_filename: str = None,
name: str = "embedding_xyz",
):
self.create_fake_embeddings(
n_data,
n_dims=n_dims,
special_name_first_sample=special_name_first_sample,
special_char_in_first_filename=special_char_in_first_filename,
)
# perform the workflow to upload the embeddings
self.api_workflow_client.upload_embeddings(
path_to_embeddings_csv=self.path_to_embeddings, name=name
)
self.api_workflow_client.n_dims_embeddings_on_server = n_dims
def test_upload_success(self):
n_data = len(self.api_workflow_client._mappings_api.sample_names)
self.t_ester_upload_embedding(n_data=n_data)
filepath_embeddings_sorted = os.path.join(
self.folder_path, "embeddings_sorted.csv"
)
self.assertFalse(os.path.isfile(filepath_embeddings_sorted))
def test_upload_wrong_length(self):
n_data = 42 + len(self.api_workflow_client._mappings_api.sample_names)
with self.assertRaises(ValueError):
self.t_ester_upload_embedding(n_data=n_data)
def test_upload_wrong_filenames(self):
n_data = len(self.api_workflow_client._mappings_api.sample_names)
with self.assertRaises(ValueError):
self.t_ester_upload_embedding(n_data=n_data, special_name_first_sample=True)
def test_upload_comma_filenames(self):
n_data = len(self.api_workflow_client._mappings_api.sample_names)
for invalid_char in INVALID_FILENAME_CHARACTERS:
with self.subTest(msg=f"invalid_char: {invalid_char}"):
with self.assertRaises(ValueError):
self.t_ester_upload_embedding(
n_data=n_data, special_char_in_first_filename=invalid_char
)
def test_set_embedding_id_default(self):
self.api_workflow_client.set_embedding_id_to_latest()
embeddings = (
self.api_workflow_client._embeddings_api.get_embeddings_by_dataset_id(
dataset_id=self.api_workflow_client.dataset_id
)
)
self.assertEqual(self.api_workflow_client.embedding_id, embeddings[0].id)
def test_set_embedding_id_no_embeddings(self):
self.api_workflow_client._embeddings_api.embeddings = []
with self.assertRaises(RuntimeError):
self.api_workflow_client.set_embedding_id_to_latest()
def test_upload_existing_embedding(self):
# first upload embeddings
n_data = len(self.api_workflow_client._mappings_api.sample_names)
self.t_ester_upload_embedding(n_data=n_data)
# create a new set of embeddings
self.create_fake_embeddings(10)
# mock the embeddings on the server
self.api_workflow_client.n_dims_embeddings_on_server = 32
self.api_workflow_client.append_embeddings(
self.path_to_embeddings,
"embedding_id_xyz_2",
)
def test_append_embeddings_with_overlap(self):
# mock the embeddings on the server
n_data_server = len(self.api_workflow_client._mappings_api.sample_names)
self.api_workflow_client.n_dims_embeddings_on_server = 32
# create new local embeddings overlapping with server embeddings
n_data_start_local = n_data_server // 3
n_data_local = n_data_server * 2
self.create_fake_embeddings(
n_data=n_data_local, n_data_start=n_data_start_local
)
"""
Assumptions:
n_data_server = 100
n_data_start_local = 33
n_data_local = 200
Server embeddings file:
filenames: 0 ... 99
labels: 0 ... 99
Local embeddings file:
filenames: 33 ... 232
labels: 0 ... 0 (all zero)
Appended embedding file must thus be:
filenames: 0 ... 232
labels: 0 ... 32 (from server) + 0 ... 0 (from local)
"""
# append the local embeddings to the server embeddings
self.api_workflow_client.append_embeddings(
self.path_to_embeddings,
"embedding_id_xyz_2",
)
# load the new (appended) embeddings
_, labels_appended, filenames_appended = io_utils.load_embeddings(
self.path_to_embeddings
)
# define the expected filenames and labels
self.create_fake_embeddings(n_data=n_data_local + n_data_start_local)
_, _, filenames_expected = io_utils.load_embeddings(self.path_to_embeddings)
labels_expected = list(range(n_data_start_local)) + [0] * n_data_local
# make sure the list of filenames and labels equal
self.assertListEqual(filenames_appended, filenames_expected)
self.assertListEqual(labels_appended, labels_expected)
def test_append_embeddings_different_shape(self):
# first upload embeddings
n_data = len(self.api_workflow_client._mappings_api.sample_names)
self.t_ester_upload_embedding(n_data=n_data)
# create a new set of embeddings
self.create_fake_embeddings(10, n_dims=16) # default is 32
self.api_workflow_client.n_dims_embeddings_on_server = 32
with self.assertRaises(RuntimeError):
self.api_workflow_client.append_embeddings(
self.path_to_embeddings,
"embedding_id_xyz_2",
)
def tearDown(self) -> None:
for filename in ["embeddings.csv", "embeddings_sorted.csv"]:
if hasattr(self, "folder_path"):
try:
filepath = os.path.join(self.folder_path, filename)
os.remove(filepath)
except FileNotFoundError:
pass
| 7,286 | 35.989848 | 88 | py |
lightly | lightly-master/tests/api_workflow/utils.py | import random
_CHARACTER_SET = "abcdef0123456789"
def generate_id(length: int = 24) -> str:
return "".join([random.choice(_CHARACTER_SET) for i in range(length)])
| 170 | 20.375 | 74 | py |
lightly | lightly-master/tests/cli/test_cli_crop.py | import os
import random
import re
import sys
import tempfile
import torchvision
import yaml
from hydra.experimental import compose, initialize
import lightly
from lightly.data import LightlyDataset
from lightly.utils.bounding_box import BoundingBox
from lightly.utils.cropping.crop_image_by_bounding_boxes import (
crop_dataset_by_bounding_boxes_and_save,
)
from lightly.utils.cropping.read_yolo_label_file import read_yolo_label_file
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLICrop(MockedApiWorkflowSetup):
def setUp(self):
MockedApiWorkflowSetup.setUp(self)
self.create_fake_dataset()
self.create_fake_yolo_labels()
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(
config_name="config",
overrides=[
f"input_dir={self.folder_path}",
f"label_dir={self.folder_path_labels}",
f"output_dir={tempfile.mkdtemp()}",
f"label_names_file={self.label_names_file}",
],
)
def create_fake_dataset(self):
n_data = len(self.api_workflow_client.get_filenames())
self.dataset = torchvision.datasets.FakeData(
size=n_data, image_size=(3, 32, 32)
)
self.folder_path = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_data)]
self.sample_names = sample_names
for sample_idx in range(n_data):
data = self.dataset[sample_idx]
path = os.path.join(self.folder_path, sample_names[sample_idx])
data[0].save(path)
def create_fake_yolo_labels(
self, no_classes: int = 10, objects_per_image: int = 13
):
random.seed(42)
n_data = len(self.api_workflow_client.get_filenames())
self.folder_path_labels = tempfile.mkdtemp()
label_names = [f"img_{i}.txt" for i in range(n_data)]
self.label_names = label_names
for filename_label in label_names:
path = os.path.join(self.folder_path_labels, filename_label)
with open(path, "a") as the_file:
for i in range(objects_per_image):
class_id = random.randint(0, no_classes - 1)
x = random.uniform(0.1, 0.9)
y = random.uniform(0.1, 0.9)
w = random.uniform(0.1, 1.0)
h = random.uniform(0.1, 1.0)
line = f"{class_id} {x} {y} {w} {h}\n"
the_file.write(line)
yaml_dict = {"names": [f"class{i}" for i in range(no_classes)]}
self.label_names_file = tempfile.mktemp(
".yaml", "data", dir=self.folder_path_labels
)
with open(self.label_names_file, "w") as file:
yaml.dump(yaml_dict, file)
def parse_cli_string(self, cli_words: str):
cli_words = cli_words.replace("lightly-crop ", "")
cli_words = re.split("=| ", cli_words)
assert len(cli_words) % 2 == 0
dict_keys = cli_words[0::2]
dict_values = cli_words[1::2]
for key, value in zip(dict_keys, dict_values):
value = value.strip('"')
value = value.strip("'")
self.cfg[key] = value
def test_parse_cli_string(self):
cli_string = "lightly-crop label_dir=/blub"
self.parse_cli_string(cli_string)
self.assertEqual(self.cfg["label_dir"], "/blub")
def test_read_yolo(self):
for f in os.listdir(self.cfg.label_dir):
if f.endswith(".txt"):
filepath = os.path.join(self.cfg.label_dir, f)
read_yolo_label_file(filepath, 0.1)
def test_crop_dataset_by_bounding_boxes_and_save(self):
dataset = LightlyDataset(self.cfg.input_dir)
output_dir = self.cfg.output_dir
no_files = len(dataset.get_filenames())
bounding_boxes_list_list = [[BoundingBox(0, 0, 1, 1)]] * no_files
class_indices_list_list = [[1]] * no_files
class_names = ["class_0", "class_1"]
with self.subTest("all_correct"):
crop_dataset_by_bounding_boxes_and_save(
dataset,
output_dir,
bounding_boxes_list_list,
class_indices_list_list,
class_names,
)
with self.subTest("wrong length of bounding_boxes_list_list"):
with self.assertRaises(ValueError):
crop_dataset_by_bounding_boxes_and_save(
dataset,
output_dir,
bounding_boxes_list_list[:-1],
class_indices_list_list,
class_names,
)
with self.subTest("wrong internal length of class_indices_list_list"):
with self.assertWarns(UserWarning):
class_indices_list_list[0] *= 2
crop_dataset_by_bounding_boxes_and_save(
dataset,
output_dir,
bounding_boxes_list_list,
class_indices_list_list,
class_names,
)
def test_crop_with_class_names(self):
cli_string = "lightly-crop crop_padding=0.1"
self.parse_cli_string(cli_string)
lightly.cli.crop_cli(self.cfg)
def test_crop_without_class_names(self):
cli_string = "lightly-crop crop_padding=0.1"
self.parse_cli_string(cli_string)
self.cfg["label_names_file"] = ""
lightly.cli.crop_cli(self.cfg)
| 5,667 | 37.040268 | 85 | py |
lightly | lightly-master/tests/cli/test_cli_download.py | import os
import sys
import tempfile
import pytest
import torchvision
from hydra.experimental import compose, initialize
import lightly
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
_DATASET_ID = "b2a40959eacd1c9a142ba57b"
class TestCLIDownload(MockedApiWorkflowSetup):
@classmethod
def setUpClass(cls) -> None:
sys.modules[
"lightly.cli.download_cli"
].ApiWorkflowClient = MockedApiWorkflowClient
def setUp(self):
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(config_name="config")
def create_fake_dataset(self, n_data: int = 5):
self.dataset = torchvision.datasets.FakeData(
size=n_data, image_size=(3, 32, 32)
)
self.input_dir = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_data)]
self.sample_names = sample_names
for sample_idx in range(n_data):
data = self.dataset[sample_idx]
path = os.path.join(self.input_dir, sample_names[sample_idx])
data[0].save(path)
self.output_dir = tempfile.mkdtemp()
def parse_cli_string(self, cli_words: str):
cli_words = cli_words.replace("lightly-download ", "")
overrides = cli_words.split(" ")
with initialize(config_path="../../lightly/cli/config/"):
self.cfg = compose(
config_name="config",
overrides=overrides,
)
def test_parse_cli_string(self):
cli_string = "lightly-download token='123' dataset_id='XYZ'"
self.parse_cli_string(cli_string)
assert self.cfg["token"] == "123"
assert self.cfg["dataset_id"] == "XYZ"
def test_download_base(self):
cli_string = f"lightly-download token='123' dataset_id='{_DATASET_ID}'"
self.parse_cli_string(cli_string)
lightly.cli.download_cli(self.cfg)
def test_download_tag_name(self):
cli_string = f"lightly-download token='123' dataset_id='{_DATASET_ID}' tag_name='selected_tag_xyz'"
self.parse_cli_string(cli_string)
lightly.cli.download_cli(self.cfg)
def test_download_tag_name_nonexisting(self):
cli_string = f"lightly-download token='123' dataset_id='{_DATASET_ID}' tag_name='nonexisting_xyz'"
self.parse_cli_string(cli_string)
with self.assertRaises(ValueError):
lightly.cli.download_cli(self.cfg)
def test_download_tag_name_exclude_parent(self):
cli_string = f"lightly-download token='123' dataset_id='{_DATASET_ID}' tag_name='selected_tag_xyz' exclude_parent_tag=True"
self.parse_cli_string(cli_string)
lightly.cli.download_cli(self.cfg)
def test_download_no_tag_name(self):
# defaults to initial-tag
cli_string = f"lightly-download token='123' dataset_id='{_DATASET_ID}'"
self.parse_cli_string(cli_string)
lightly.cli.download_cli(self.cfg)
def test_download_no_token(self):
cli_string = (
f"lightly-download dataset_id='{_DATASET_ID}' tag_name='selected_tag_xyz'"
)
self.parse_cli_string(cli_string)
with self.assertWarns(UserWarning):
lightly.cli.download_cli(self.cfg)
def test_download_no_dataset_id(self):
cli_string = "lightly-download token='123' tag_name='selected_tag_xyz'"
self.parse_cli_string(cli_string)
with self.assertWarns(UserWarning):
lightly.cli.download_cli(self.cfg)
def test_download_copy_from_input_to_output_dir(self):
self.create_fake_dataset(n_data=100)
cli_string = (
f"lightly-download token='123' dataset_id='{_DATASET_ID}' tag_name='selected_tag_xyz' "
f"input_dir={self.input_dir} output_dir={self.output_dir}"
)
self.parse_cli_string(cli_string)
lightly.cli.download_cli(self.cfg)
def test_download_from_tag_with_integer_name(self):
"""Test to reproduce issue #575."""
# use tag name "1000"
cli_string = (
f"lightly-download token='123' dataset_id='{_DATASET_ID}' tag_name=1000"
)
self.parse_cli_string(cli_string)
with pytest.warns(None) as record:
lightly.cli.download_cli(self.cfg)
# check if the warning "Tag with name 1000 does not exist" is raised
# if so, the cli string was not parsed correctly
# (i.e. as int instead of str)
self.assertEqual(len(record), 0)
def tearDown(self) -> None:
try:
os.remove(f"{self.cfg['tag_name']}.txt")
except FileNotFoundError:
pass
| 4,725 | 35.635659 | 131 | py |
lightly | lightly-master/tests/cli/test_cli_embed.py | import os
import re
import sys
import tempfile
import torchvision
from hydra.experimental import compose, initialize
import lightly
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLIEmbed(MockedApiWorkflowSetup):
@classmethod
def setUpClass(cls) -> None:
sys.modules["lightly.cli.embed_cli"].ApiWorkflowClient = MockedApiWorkflowClient
def setUp(self):
MockedApiWorkflowSetup.setUp(self)
self.create_fake_dataset()
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(
config_name="config",
overrides=[
"token='123'",
f"input_dir={self.folder_path}",
"trainer.max_epochs=0",
],
)
def create_fake_dataset(self):
n_data = 16
self.dataset = torchvision.datasets.FakeData(
size=n_data, image_size=(3, 32, 32)
)
self.folder_path = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_data)]
self.sample_names = sample_names
for sample_idx in range(n_data):
data = self.dataset[sample_idx]
path = os.path.join(self.folder_path, sample_names[sample_idx])
data[0].save(path)
def test_embed(self):
lightly.cli.embed_cli(self.cfg)
self.assertGreater(
len(
os.getenv(
self.cfg["environment_variable_names"][
"lightly_last_embedding_path"
]
)
),
0,
)
def tearDown(self) -> None:
for filename in ["embeddings.csv", "embeddings_sorted.csv"]:
try:
os.remove(filename)
except FileNotFoundError:
pass
| 1,945 | 28.044776 | 88 | py |
lightly | lightly-master/tests/cli/test_cli_get_lighty_config.py | from lightly.cli.config.get_config import get_lightly_config
def test_get_lightly_config() -> None:
conf = get_lightly_config()
# Assert some default values
assert conf.checkpoint == ""
assert conf.loader.batch_size == 16
assert conf.trainer.weights_summary is None
assert conf.summary_callback.max_depth == 1
| 336 | 29.636364 | 60 | py |
lightly | lightly-master/tests/cli/test_cli_magic.py | import os
import re
import sys
import tempfile
import torchvision
from hydra.experimental import compose, initialize
from lightly import cli
from tests.api_workflow.mocked_api_workflow_client import (
N_FILES_ON_SERVER,
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLIMagic(MockedApiWorkflowSetup):
def setUp(self):
MockedApiWorkflowSetup.setUp(self)
self.create_fake_dataset()
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(
config_name="config",
overrides=[
f"input_dir={self.folder_path}",
"trainer.max_epochs=0",
],
)
def create_fake_dataset(self, filename_appendix: str = ""):
n_data = len(self.api_workflow_client.get_filenames())
self.dataset = torchvision.datasets.FakeData(
size=n_data, image_size=(3, 32, 32)
)
self.folder_path = tempfile.mkdtemp()
sample_names = [f"img_{i}{filename_appendix}.jpg" for i in range(n_data)]
self.sample_names = sample_names
for sample_idx in range(n_data):
data = self.dataset[sample_idx]
path = os.path.join(self.folder_path, sample_names[sample_idx])
data[0].save(path)
def parse_cli_string(self, cli_words: str):
cli_words = cli_words.replace("lightly-magic ", "")
cli_words = re.split("=| ", cli_words)
assert len(cli_words) % 2 == 0
dict_keys = cli_words[0::2]
dict_values = cli_words[1::2]
for key, value in zip(dict_keys, dict_values):
value = value.strip('"')
value = value.strip("'")
try:
value = int(value)
except ValueError:
pass
key_subparts = key.split(".")
if len(key_subparts) == 1:
self.cfg[key] = value
elif len(key_subparts) == 2:
self.cfg[key_subparts[0]][key_subparts[1]] = value
else:
raise ValueError(
f"Keys with more than 2 subparts are not supported,"
f"but you entered {key}."
)
def test_parse_cli_string(self):
cli_string = "lightly-magic trainer.max_epochs=3"
self.parse_cli_string(cli_string)
self.assertEqual(self.cfg["trainer"]["max_epochs"], 3)
def test_magic_with_trainer(self):
MockedApiWorkflowClient.n_dims_embeddings_on_server = 32
cli_string = "lightly-magic trainer.max_epochs=1"
self.parse_cli_string(cli_string)
cli.lightly_cli(self.cfg)
def tearDown(self) -> None:
for filename in ["embeddings.csv", "embeddings_sorted.csv"]:
try:
os.remove(filename)
except FileNotFoundError:
pass
| 2,919 | 32.953488 | 85 | py |
lightly | lightly-master/tests/cli/test_cli_train.py | import os
import re
import sys
import tempfile
import torchvision
from hydra.experimental import compose, initialize
from lightly import cli
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLITrain(MockedApiWorkflowSetup):
def setUp(self):
MockedApiWorkflowSetup.setUp(self)
self.create_fake_dataset()
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(
config_name="config",
overrides=[
f"input_dir={self.folder_path}",
"trainer.max_epochs=1",
],
)
def create_fake_dataset(self):
n_data = 5
self.dataset = torchvision.datasets.FakeData(
size=n_data, image_size=(3, 32, 32)
)
self.folder_path = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_data)]
self.sample_names = sample_names
for sample_idx in range(n_data):
data = self.dataset[sample_idx]
path = os.path.join(self.folder_path, sample_names[sample_idx])
data[0].save(path)
def test_checkpoint_created(self):
cli.train_cli(self.cfg)
checkpoint_path = os.getenv(
self.cfg["environment_variable_names"]["lightly_last_checkpoint_path"]
)
assert checkpoint_path.endswith(".ckpt")
assert os.path.isfile(checkpoint_path)
def tearDown(self) -> None:
for filename in ["embeddings.csv", "embeddings_sorted.csv"]:
try:
os.remove(filename)
except FileNotFoundError:
pass
| 1,743 | 29.596491 | 85 | py |
lightly | lightly-master/tests/cli/test_cli_version.py | import os
import re
import sys
import tempfile
import pytest
from hydra.experimental import compose, initialize
from lightly.cli.version_cli import version_cli
from tests.api_workflow.mocked_api_workflow_client import (
MockedApiWorkflowClient,
MockedApiWorkflowSetup,
)
class TestCLIVersion(MockedApiWorkflowSetup):
def setUp(self):
MockedApiWorkflowSetup.setUp(self)
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(config_name="config")
@pytest.fixture(autouse=True)
def capsys(self, capsys):
self.capsys = capsys
def test_checkpoint_created(self):
version_cli(self.cfg)
out, err = self.capsys.readouterr()
assert out.startswith("lightly version")
| 789 | 25.333333 | 85 | py |
lightly | lightly-master/tests/core/test_Core.py | import os
import re
import shutil
import tempfile
import unittest
import numpy as np
import pytest
import torchvision
from lightly.core import train_model_and_embed_images
class TestCore(unittest.TestCase):
def ensure_dir(self, path_to_folder: str):
if not os.path.exists(path_to_folder):
os.makedirs(path_to_folder)
def create_dataset(self, n_subfolders=5, n_samples_per_subfolder=20):
n_tot = n_subfolders * n_samples_per_subfolder
dataset = torchvision.datasets.FakeData(size=n_tot, image_size=(3, 32, 32))
tmp_dir = tempfile.mkdtemp()
folder_names = [f"folder_{i}" for i in range(n_subfolders)]
sample_names = [f"img_{i}.jpg" for i in range(n_samples_per_subfolder)]
for folder_idx in range(n_subfolders):
for sample_idx in range(n_samples_per_subfolder):
idx = (folder_idx * n_subfolders) + sample_idx
data = dataset[idx]
self.ensure_dir(os.path.join(tmp_dir, folder_names[folder_idx]))
data[0].save(
os.path.join(
tmp_dir, folder_names[folder_idx], sample_names[sample_idx]
)
)
self.dataset_dir = tmp_dir
return tmp_dir, folder_names, sample_names
# @pytest.mark.slow
def test_train_and_embed(self):
n_subfolders = 3
n_samples_per_subfolder = 3
n_samples = n_subfolders * n_samples_per_subfolder
# embed, no overwrites
dataset_dir, _, _ = self.create_dataset(n_subfolders, n_samples_per_subfolder)
# train, one overwrite
embeddings, labels, filenames = train_model_and_embed_images(
input_dir=dataset_dir,
trainer={"max_epochs": 1},
loader={"num_workers": 0},
)
self.assertEqual(len(embeddings), n_samples)
self.assertEqual(len(labels), n_samples)
self.assertEqual(len(filenames), n_samples)
self.assertIsInstance(embeddings[0], np.ndarray)
self.assertIsInstance(int(labels[0]), int) # see if casting to int works
self.assertIsInstance(filenames[0], str)
def tearDown(self) -> None:
shutil.rmtree(self.dataset_dir)
pattern = "(.*)?.ckpt$"
for root, dirs, files in os.walk(os.getcwd()):
for file in filter(lambda x: re.match(pattern, x), files):
os.remove(os.path.join(root, file))
| 2,462 | 33.690141 | 86 | py |
lightly | lightly-master/tests/data/test_LightlyDataset.py | import os
import random
import re
import shutil
import tempfile
import unittest
import warnings
from typing import List, Tuple
import numpy as np
import torch
import torchvision
from PIL.Image import Image
from lightly.data import LightlyDataset
from lightly.data._utils import check_images
from lightly.utils.io import INVALID_FILENAME_CHARACTERS
try:
import av
import cv2
from lightly.data._video import VideoDataset
VIDEO_DATASET_AVAILABLE = True
except ModuleNotFoundError:
VIDEO_DATASET_AVAILABLE = False
class TestLightlyDataset(unittest.TestCase):
def ensure_dir(self, path_to_folder: str):
os.makedirs(path_to_folder, exist_ok=True)
def create_dataset_no_subdir(self, n_samples: int) -> Tuple[str, List[str]]:
dataset = torchvision.datasets.FakeData(size=n_samples, image_size=(3, 32, 32))
tmp_dir = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_samples)]
for sample_idx in range(n_samples):
data = dataset[sample_idx]
path = os.path.join(tmp_dir, sample_names[sample_idx])
data[0].save(path)
return tmp_dir, sample_names
def create_dataset(self, n_subfolders=5, n_samples_per_subfolder=20):
n_tot = n_subfolders * n_samples_per_subfolder
dataset = torchvision.datasets.FakeData(size=n_tot, image_size=(3, 32, 32))
tmp_dir = tempfile.mkdtemp()
folder_names = [f"folder_{i}" for i in range(n_subfolders)]
sample_names = [f"img_{i}.jpg" for i in range(n_samples_per_subfolder)]
for folder_idx in range(n_subfolders):
for sample_idx in range(n_samples_per_subfolder):
idx = (folder_idx * n_subfolders) + sample_idx
data = dataset[idx]
self.ensure_dir(os.path.join(tmp_dir, folder_names[folder_idx]))
data[0].save(
os.path.join(
tmp_dir, folder_names[folder_idx], sample_names[sample_idx]
)
)
return tmp_dir, folder_names, sample_names
def create_video_dataset(self, n_videos=5, n_frames_per_video=10, w=32, h=32, c=3):
self.n_videos = n_videos
self.n_frames_per_video = n_frames_per_video
self.input_dir = tempfile.mkdtemp()
self.ensure_dir(self.input_dir)
self.frames = (np.random.randn(n_frames_per_video, w, h, c) * 255).astype(
np.uint8
)
self.extensions = (".avi",)
self.filenames = []
for i in range(5):
filename = f"output-{i}.avi"
self.filenames.append(filename)
path = os.path.join(self.input_dir, filename)
out = cv2.VideoWriter(path, 0, 1, (w, h))
for frame in self.frames:
out.write(frame)
out.release()
def test_create_lightly_dataset_from_folder(self):
n_subfolders = 5
n_samples_per_subfolder = 10
n_tot_files = n_subfolders * n_samples_per_subfolder
dataset_dir, folder_names, sample_names = self.create_dataset(
n_subfolders, n_samples_per_subfolder
)
dataset = LightlyDataset(input_dir=dataset_dir)
filenames = dataset.get_filenames()
fnames = []
for dir_name in folder_names:
for fname in sample_names:
fnames.append(os.path.join(dir_name, fname))
self.assertEqual(len(filenames), n_tot_files)
self.assertEqual(len(dataset), n_tot_files)
self.assertListEqual(sorted(fnames), sorted(filenames))
out_dir = tempfile.mkdtemp()
dataset.dump(out_dir)
self.assertEqual(
sum(
len(os.listdir(os.path.join(out_dir, subdir)))
for subdir in os.listdir(out_dir)
),
len(dataset),
)
shutil.rmtree(dataset_dir)
shutil.rmtree(out_dir)
def test_create_lightly_dataset_from_folder_nosubdir(self):
# create a dataset
n_tot = 100
tmp_dir, sample_names = self.create_dataset_no_subdir(n_tot)
# create lightly dataset
dataset = LightlyDataset(input_dir=tmp_dir)
filenames = dataset.get_filenames()
# tests
self.assertEqual(len(filenames), n_tot)
self.assertEqual(len(dataset), n_tot)
self.assertListEqual(sorted(sample_names), sorted(filenames))
for i in range(n_tot):
sample, target, fname = dataset[i]
def test_create_lightly_dataset_with_invalid_char_in_filename(self):
# create a dataset
n_tot = 100
dataset = torchvision.datasets.FakeData(size=n_tot, image_size=(3, 32, 32))
for invalid_char in INVALID_FILENAME_CHARACTERS:
with self.subTest(msg=f"invalid_char: {invalid_char}"):
tmp_dir = tempfile.mkdtemp()
sample_names = [f"img_,_{i}.jpg" for i in range(n_tot)]
for sample_idx in range(n_tot):
data = dataset[sample_idx]
path = os.path.join(tmp_dir, sample_names[sample_idx])
data[0].save(path)
# create lightly dataset
with self.assertRaises(ValueError):
dataset = LightlyDataset(input_dir=tmp_dir)
def test_check_images(self):
# create a dataset
tmp_dir = tempfile.mkdtemp()
n_healthy = 100
n_corrupt = 20
dataset = torchvision.datasets.FakeData(size=n_healthy, image_size=(3, 32, 32))
sample_names = [f"img_{i}.jpg" for i in range(n_healthy)]
for sample_name, data in zip(sample_names, dataset):
path = os.path.join(tmp_dir, sample_name)
data[0].save(path)
corrupt_sample_names = [
f"img_{i}.jpg" for i in range(n_healthy, n_healthy + n_corrupt)
]
for sample_name in corrupt_sample_names:
path = os.path.join(tmp_dir, sample_name)
with open(path, "a") as f:
f.write("this_is_not_an_image")
# tests
healthy_images, corrupt_images = check_images(tmp_dir)
assert len(healthy_images) == n_healthy
assert len(corrupt_images) == n_corrupt
def test_not_existing_folder_dataset(self):
with self.assertRaises(ValueError):
LightlyDataset("/a-random-hopefully-non/existing-path-to-nowhere/")
def test_from_torch_dataset(self):
_dataset = torchvision.datasets.FakeData(size=1, image_size=(3, 32, 32))
dataset = LightlyDataset.from_torch_dataset(_dataset)
self.assertEqual(len(_dataset), len(dataset))
self.assertEqual(len(dataset.get_filenames()), len(dataset))
def test_from_torch_dataset_with_transform(self):
dataset_ = torchvision.datasets.FakeData(size=1, image_size=(3, 32, 32))
dataset = LightlyDataset.from_torch_dataset(
dataset_, transform=torchvision.transforms.ToTensor()
)
self.assertIsNotNone(dataset.transform)
self.assertIsNotNone(dataset.dataset.transform)
def test_filenames_dataset_no_samples(self):
tmp_dir, folder_names, sample_names = self.create_dataset()
with self.assertRaises((RuntimeError, FileNotFoundError)):
dataset = LightlyDataset(input_dir=tmp_dir, filenames=[])
@unittest.skip("https://github.com/lightly-ai/lightly/issues/535")
def test_filenames_dataset_with_subdir(self):
tmp_dir, folder_names, sample_names = self.create_dataset()
folder_name_to_target = {
folder_name: i for i, folder_name in enumerate(folder_names)
}
all_filenames = [
os.path.join(folder_name, sample_name)
for folder_name in folder_names
for sample_name in sample_names
]
n_samples = int(len(all_filenames) / 2)
for i in range(5):
np.random.seed(i)
filenames = np.random.choice(all_filenames, n_samples, replace=False)
dataset = LightlyDataset(input_dir=tmp_dir, filenames=filenames)
filenames_dataset = dataset.get_filenames()
self.assertEqual(len(filenames_dataset), len(dataset))
self.assertEqual(len(filenames_dataset), len(filenames))
self.assertEqual(set(filenames_dataset), set(filenames))
filenames_dataset = set(filenames_dataset)
for image, target, filename in dataset:
self.assertIsInstance(image, Image)
folder_name = filename.split(sep=os.sep)[0]
self.assertEqual(target, folder_name_to_target[folder_name])
self.assertIsInstance(filename, str)
assert filename in filenames_dataset
def test_filenames_dataset_no_subdir(self):
# create a dataset
n_tot = 100
dataset = torchvision.datasets.FakeData(size=n_tot, image_size=(3, 32, 32))
tmp_dir = tempfile.mkdtemp()
all_filenames = [f"img_{i}.jpg" for i in range(n_tot)]
for sample_idx in range(n_tot):
data = dataset[sample_idx]
path = os.path.join(tmp_dir, all_filenames[sample_idx])
data[0].save(path)
n_samples = len(all_filenames) // 2
for i in range(5):
np.random.seed(i)
filenames = np.random.choice(all_filenames, n_samples, replace=False)
dataset = LightlyDataset(input_dir=tmp_dir, filenames=filenames)
filenames_dataset = dataset.get_filenames()
self.assertEqual(len(filenames_dataset), len(dataset))
self.assertEqual(len(filenames_dataset), len(filenames))
self.assertEqual(set(filenames_dataset), set(filenames))
filenames_dataset = set(filenames_dataset)
for image, target, filename in dataset:
self.assertIsInstance(image, Image)
self.assertEqual(target, 0)
self.assertIsInstance(filename, str)
self.assertIn(filename, filenames_dataset)
@unittest.skipUnless(VIDEO_DATASET_AVAILABLE, "PyAV and CV2 are both installed")
def test_video_dataset_available(self):
self.create_video_dataset()
dataset = LightlyDataset(input_dir=self.input_dir)
out_dir = tempfile.mkdtemp()
dataset.dump(out_dir, dataset.get_filenames()[(len(dataset) // 2) :])
self.assertEqual(len(os.listdir(out_dir)), len(dataset) // 2)
for filename in os.listdir(out_dir):
self.assertIn(filename, dataset.get_filenames()[(len(dataset) // 2) :])
@unittest.skipIf(VIDEO_DATASET_AVAILABLE, "PyAV or CV2 is/are not installed")
def test_video_dataset_unavailable(self):
tmp_dir = tempfile.mkdtemp()
# simulate a video
# the video dataset will check to see whether there exists a file
# with a video extension, it's enough to fake a video file here
path = os.path.join(tmp_dir, "my_file.png")
dataset = torchvision.datasets.FakeData(size=1, image_size=(3, 32, 32))
image, _ = dataset[0]
image.save(path)
os.rename(path, os.path.join(tmp_dir, "my_file.avi"))
with self.assertRaises(ImportError):
dataset = LightlyDataset(input_dir=tmp_dir)
shutil.rmtree(tmp_dir)
return
@unittest.skipUnless(VIDEO_DATASET_AVAILABLE, "PyAV or CV2 are not available")
def test_video_dataset_filenames(self):
self.create_video_dataset()
all_filenames = self.filenames
def filename_img_fits_video(filename_img: str):
for filename_video in all_filenames:
filename_video = filename_video[: -1 * len(".avi")]
if filename_video in filename_img:
return True
return False
n_samples = int(len(all_filenames) / 2)
np.random.seed(42)
filenames = np.random.choice(all_filenames, n_samples, replace=False)
dataset = LightlyDataset(input_dir=self.input_dir, filenames=filenames)
filenames_dataset = dataset.get_filenames()
for image, target, filename in dataset:
self.assertIsInstance(image, Image)
self.assertTrue(filename_img_fits_video(filename))
self.assertIsInstance(filename, str)
self.assertIn(filename, filenames_dataset)
def test_transform_setter(self, dataset: LightlyDataset = None):
if dataset is None:
tmp_dir, _, _ = self.create_dataset()
dataset = LightlyDataset(input_dir=tmp_dir)
# the transform of both datasets should be None
self.assertIsNone(dataset.transform)
self.assertIsNone(dataset.dataset.transform)
# use the setter
dataset.transform = torchvision.transforms.ToTensor()
# assert that the transform is set in the nested dataset
self.assertIsNotNone(dataset.transform)
self.assertIsNotNone(dataset.dataset.transform)
def test_no_dir_no_transform_fails(self):
with self.assertRaises(ValueError):
LightlyDataset(None, transform=torchvision.transforms.ToTensor())
@unittest.skipUnless(VIDEO_DATASET_AVAILABLE, "PyAV or CV2 is/are not installed")
def test_dataset_get_filenames(self):
self.create_video_dataset()
dataset = LightlyDataset(input_dir=self.input_dir)
video_dataset = dataset.dataset
# Get filenames using VideoDataset.get_filenames.
video_dataset_filenames = video_dataset.get_filenames()
# Get filenames using calls to VideoDataset.get_filename(index).
# This removes the optimization introduced in VideoDatset.get_filenames.
# Both methods should give the same result.
get_filenames = VideoDataset.get_filenames
del VideoDataset.get_filenames
lightly_dataset_filenames = dataset.get_filenames()
VideoDataset.get_filenames = get_filenames
assert video_dataset_filenames == lightly_dataset_filenames
def test_dataset_with_subdirs(self):
tmp_dir, _, _ = self.create_dataset()
with self.subTest("no read rights files"):
for subdir, dirs, files in os.walk(tmp_dir):
for filename in files:
filepath = os.path.join(subdir, filename)
os.chmod(filepath, 0o000)
dataset = LightlyDataset(input_dir=tmp_dir)
self.assertGreater(len(dataset.get_filenames()), 0)
with self.assertRaises(PermissionError):
for _ in dataset:
pass
with self.subTest("no read rights subfolders"):
for subdir, dirs, files in os.walk(tmp_dir):
os.chmod(subdir, 0o000)
with self.assertRaises(PermissionError):
dataset = LightlyDataset(input_dir=tmp_dir)
with self.subTest("no read rights root"):
os.chmod(tmp_dir, 0o000)
with self.assertRaises(PermissionError):
dataset = LightlyDataset(input_dir=tmp_dir)
def test_dataset_plain(self):
tmp_dir, _ = self.create_dataset_no_subdir(100)
with self.subTest("no read rights files"):
for subdir, dirs, files in os.walk(tmp_dir):
for filename in files:
filepath = os.path.join(tmp_dir, filename)
os.chmod(filepath, 0o000)
dataset = LightlyDataset(input_dir=tmp_dir)
self.assertGreater(len(dataset.get_filenames()), 0)
with self.assertRaises(PermissionError):
for _ in dataset:
pass
with self.subTest("no read rights root"):
os.chmod(tmp_dir, 0o000)
with self.assertRaises(PermissionError):
dataset = LightlyDataset(input_dir=tmp_dir)
| 15,873 | 38.984887 | 87 | py |
lightly | lightly-master/tests/data/test_LightlySubset.py | import random
import tempfile
import unittest
from typing import List, Tuple
from lightly.data.dataset import LightlyDataset
from lightly.data.lightly_subset import LightlySubset
from tests.data.test_LightlyDataset import TestLightlyDataset
try:
import av
import cv2
from lightly.data._video import VideoDataset
VIDEO_DATASET_AVAILABLE = True
except ModuleNotFoundError:
VIDEO_DATASET_AVAILABLE = False
class TestLightlySubset(TestLightlyDataset):
def setUp(self) -> None:
tmp_dir, folder_names, sample_names = self.create_dataset(
n_subfolders=5, n_samples_per_subfolder=5
)
self.input_dir = tmp_dir
def create_subset(self, seed=0) -> Tuple[LightlySubset, List[str]]:
random.seed(seed)
base_dataset = LightlyDataset(input_dir=self.input_dir)
filenames_base_dataset = base_dataset.get_filenames()
no_samples_subset = int(len(filenames_base_dataset) * 0.5)
filenames_subset = random.sample(filenames_base_dataset, no_samples_subset)
subset = LightlySubset(
base_dataset=base_dataset, filenames_subset=filenames_subset
)
return subset, filenames_subset
@unittest.skipUnless(VIDEO_DATASET_AVAILABLE, "PyAV and CV2 are both installed")
def create_video_subset(self, seed=0) -> Tuple[LightlySubset, List[str]]:
random.seed(seed)
self.create_video_dataset(n_videos=5, n_frames_per_video=10)
base_dataset = LightlyDataset(self.input_dir)
filenames_base_dataset = base_dataset.get_filenames()
no_samples_subset = int(len(filenames_base_dataset) * 0.5)
filenames_subset = random.sample(filenames_base_dataset, no_samples_subset)
subset = LightlySubset(
base_dataset=base_dataset, filenames_subset=filenames_subset
)
return subset, filenames_subset
def test_create_lightly_subset(self):
subset, filenames_subset = self.create_subset()
assert subset.get_filenames() == filenames_subset
for index_subset, filename_subset in enumerate(filenames_subset):
sample, target, fname = subset.__getitem__(index_subset)
assert filename_subset == fname
@unittest.skipUnless(VIDEO_DATASET_AVAILABLE, "PyAV and CV2 are both installed")
def test_create_lightly_video_subset(self):
subset, filenames_subset = self.create_video_subset()
assert subset.get_filenames() == filenames_subset
for index_subset, filename_subset in enumerate(filenames_subset):
sample, target, fname = subset.__getitem__(index_subset)
assert filename_subset == fname
def test_lightly_subset_transform(self):
subset, filenames_subset = self.create_subset()
self.test_transform_setter(dataset=subset)
def test_lightly_subset_dump(self):
subset, filenames_subset = self.create_subset()
dataset = subset
out_dir = tempfile.mkdtemp()
dataset.dump(out_dir)
files_output_dir = LightlyDataset(input_dir=out_dir).get_filenames()
assert set(files_output_dir) == set(dataset.get_filenames())
| 3,160 | 35.755814 | 84 | py |
lightly | lightly-master/tests/data/test_VideoDataset.py | import contextlib
import io
import os
import shutil
import tempfile
import unittest
from fractions import Fraction
from typing import List
from unittest import mock
import cv2
import numpy as np
import PIL
import torch
import torchvision
from lightly.data import LightlyDataset, NonIncreasingTimestampError
from lightly.data._video import (
VideoDataset,
_find_non_increasing_timestamps,
_make_dataset,
)
try:
import av
PYAV_AVAILABLE = True
except ModuleNotFoundError:
PYAV_AVAILABLE = False
VIDEO_READER_AVAILABLE = torchvision.io._HAS_VIDEO_OPT
VIDEO_BACKENDS = ["pyav", "video_reader"]
DEFAULT_BACKEND = "pyav"
@unittest.skipUnless(
PYAV_AVAILABLE or VIDEO_READER_AVAILABLE, "No video backend available"
)
class TestVideoDataset(unittest.TestCase):
def tearDown(self):
# Make sure to set the default backend to not interfere with other tests.
torchvision.set_video_backend(DEFAULT_BACKEND)
def ensure_dir(self, path_to_folder: str):
if not os.path.exists(path_to_folder):
os.makedirs(path_to_folder)
def create_dataset_specified_frames_per_video(
self, frames_per_video: List[int], w=32, h=32, c=3
):
self.input_dir = tempfile.mkdtemp()
self.ensure_dir(self.input_dir)
self.frames_over_videos = [
(np.random.randn(frames, w, h, c) * 255).astype(np.uint8)
for frames in frames_per_video
]
self.extensions = ".avi"
for frames in self.frames_over_videos:
path = os.path.join(self.input_dir, f"output-{len(frames):03}.avi")
print(path)
out = cv2.VideoWriter(path, 0, 1, (w, h))
for frame in frames:
out.write(frame)
out.release()
def create_dataset(self, n_videos=5, n_frames_per_video=10, w=32, h=32, c=3):
self.n_videos = n_videos
self.n_frames_per_video = n_frames_per_video
self.input_dir = tempfile.mkdtemp()
self.ensure_dir(self.input_dir)
self.frames = (np.random.randn(n_frames_per_video, w, h, c) * 255).astype(
np.uint8
)
self.extensions = ".avi"
for i in range(n_videos):
path = os.path.join(self.input_dir, f"output-{i}.avi")
print(path)
out = cv2.VideoWriter(path, 0, 1, (w, h))
for frame in self.frames:
out.write(frame)
out.release()
@unittest.skipUnless(
PYAV_AVAILABLE and VIDEO_READER_AVAILABLE,
"pyav and video_reader backends must be both available",
)
def test_video_similar_timestamps_for_different_backends(self):
frames_per_video = list(range(1, 10))
self.create_dataset_specified_frames_per_video(frames_per_video)
timestamps = []
offsets = []
backends = []
instances = []
# iterate through different backends
for backend in VIDEO_BACKENDS:
torchvision.set_video_backend(backend)
video_instances, video_timestamps, video_offsets, _ = _make_dataset(
self.input_dir, extensions=self.extensions
)
timestamps.append(video_timestamps)
offsets.append(video_offsets)
backends.append(backend)
instances.append(video_instances)
# make sure backends don't match (sanity check)
self.assertNotEqual(backends[0], backends[1])
# we expect the same timestamps and offsets
self.assertEqual(timestamps[0], timestamps[1])
self.assertEqual(offsets[0], offsets[1])
expected_frame_counts = [int(filename[-7:-4]) for filename in instances[0]]
# calculate expected offsets with old (slow) implementation
expected_offsets = [0] + expected_frame_counts[:-1]
for i in range(1, len(expected_offsets)):
expected_offsets[i] = expected_offsets[i - 1] + expected_offsets[i]
self.assertEqual(expected_offsets, offsets[0])
shutil.rmtree(self.input_dir)
def test_video_dataset_tqdm_args(self):
self.create_dataset()
desc = "test_video_dataset_tqdm_args description asdf"
f = io.StringIO()
with contextlib.redirect_stderr(f):
dataset = VideoDataset(
self.input_dir,
extensions=self.extensions,
tqdm_args={
"desc": desc,
},
)
shutil.rmtree(self.input_dir)
printed = f.getvalue()
self.assertTrue(desc in printed)
def test_video_dataset_init_dataloader(self):
self.create_dataset()
dataset_4_workers = LightlyDataset(
self.input_dir, num_workers_video_frame_counting=4
)
dataset_0_workers = LightlyDataset(
self.input_dir, num_workers_video_frame_counting=0
)
self.assertListEqual(
dataset_0_workers.get_filenames(), dataset_4_workers.get_filenames()
)
self.assertListEqual(
dataset_0_workers.dataset.offsets, dataset_4_workers.dataset.offsets
)
for timestamps_0_workers, timestamps_4_workers in zip(
dataset_0_workers.dataset.video_timestamps,
dataset_4_workers.dataset.video_timestamps,
):
self.assertListEqual(timestamps_0_workers, timestamps_4_workers)
self.assertTupleEqual(
dataset_0_workers.dataset.fps, dataset_4_workers.dataset.fps
)
@unittest.skipUnless(PYAV_AVAILABLE, "PyAV unavailable")
def test_video_dataset_from_folder__pyav(self) -> None:
torchvision.set_video_backend("pyav")
self._test_video_dataset_from_folder()
@unittest.skipUnless(VIDEO_READER_AVAILABLE, "video_reader unavailable")
def test_video_dataset_from_folder__video_reader(self) -> None:
torchvision.set_video_backend("video_reader")
self._test_video_dataset_from_folder()
def _test_video_dataset_from_folder(self):
self.create_dataset()
# create dataset
dataset = VideoDataset(self.input_dir, extensions=self.extensions)
# __len__
self.assertEqual(len(dataset), self.n_frames_per_video * self.n_videos)
# __getitem__
for i in range(len(dataset)):
frame, label = dataset[i]
self.assertIsInstance(frame, PIL.Image.Image)
self.assertEqual(label, i // self.n_frames_per_video)
# get_filename
for i in range(len(dataset)):
frame, label = dataset[i]
filename = dataset.get_filename(i)
print(filename)
self.assertTrue(
filename.endswith(f"-{(i % self.n_frames_per_video):02d}-avi.png")
)
shutil.rmtree(self.input_dir)
def test_video_dataset_no_read_rights(self):
n_videos = 7
self.create_dataset(n_videos=n_videos)
with self.subTest("no read rights files"):
for subdir, dirs, files in os.walk(self.input_dir):
for filename in files:
filepath = os.path.join(self.input_dir, filename)
os.chmod(filepath, 0o000)
# This will not raise any Permissions error, as they are caught by torchvision:
# https://github.com/pytorch/vision/blob/5985504cc32011fbd4312600b4492d8ae0dd13b4/torchvision/io/video.py#L397
dataset = LightlyDataset(self.input_dir)
self.assertEqual(len(dataset), 0)
with self.subTest("no read rights subdirs"):
for subdir, dirs, files in os.walk(self.input_dir):
os.chmod(subdir, 0o000)
with self.assertRaises(PermissionError):
dataset = LightlyDataset(self.input_dir)
with self.subTest("no read rights root"):
os.chmod(self.input_dir, 0o000)
with self.assertRaises(PermissionError):
dataset = LightlyDataset(self.input_dir)
@unittest.skipUnless(PYAV_AVAILABLE, "PyAV unavailable")
def test_video_dataset_non_increasing_timestamps__pyav(self):
torchvision.set_video_backend("pyav")
self._test_video_dataset_non_increasing_timestamps()
@unittest.skipUnless(VIDEO_READER_AVAILABLE, "video_reader unavailable")
def test_video_dataset_non_increasing_timestamps__video_reader(self):
torchvision.set_video_backend("video_reader")
self._test_video_dataset_non_increasing_timestamps()
def _test_video_dataset_non_increasing_timestamps(self):
self.create_dataset(n_videos=2, n_frames_per_video=5)
# overwrite the _make_dataset function to return a wrong timestamp
def _make_dataset_with_non_increasing_timestamps(*args, **kwargs):
video_instances, timestamps, offsets, fpss = _make_dataset(*args, **kwargs)
# set timestamp of 4th frame in 1st video to timestamp of 2nd frame.
timestamps[0][3] = timestamps[0][1]
return video_instances, timestamps, offsets, fpss
with mock.patch(
"lightly.data._video._make_dataset",
_make_dataset_with_non_increasing_timestamps,
):
# getting frame at wrong timestamp should throw an exception
dataset = VideoDataset(self.input_dir, extensions=self.extensions)
for i in range(len(dataset)):
if i == 3:
# frame with wrong timestamp
with self.assertRaises(NonIncreasingTimestampError):
dataset[i]
else:
dataset[i]
# Getting frame at wrong timestamp should throw an exception
# from dataloader but not break the dataloader itself. Future
# calls to next() should still work.
dataloader = torch.utils.data.DataLoader(
dataset, num_workers=2, batch_size=None, collate_fn=lambda x: x
)
dataloader_iter = iter(dataloader)
for i in range(len(dataset)):
if i == 3:
# frame with wrong timestamp
with self.assertRaises(NonIncreasingTimestampError):
next(dataloader_iter)
else:
next(dataloader_iter)
# disable exception, should be able to load all frames
dataset.exception_on_non_increasing_timestamp = False
total_frames = 0
for _ in dataset:
total_frames += 1
self.assertEqual(total_frames, len(dataset))
@unittest.skipUnless(PYAV_AVAILABLE, "PyAV unavailable")
def test_video_dataset_dataloader__pyav(self):
torchvision.set_video_backend("pyav")
self._test_video_dataset_dataloader()
@unittest.skipUnless(VIDEO_READER_AVAILABLE, "video_reader unavailable")
def test_video_dataset_dataloader__video_reader(self):
torchvision.set_video_backend("video_reader")
self._test_video_dataset_dataloader()
def _test_video_dataset_dataloader(self):
self.create_dataset()
dataset = VideoDataset(self.input_dir, extensions=self.extensions)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=2,
batch_size=3,
shuffle=True,
collate_fn=lambda x: x,
)
for batch in dataloader:
pass
def test_find_non_increasing_timestamps(self):
# no timestamps
non_increasing = _find_non_increasing_timestamps([])
self.assertListEqual(non_increasing, [])
# single timestamp
timestamps = [Fraction(0, 1)]
expected = [False]
non_increasing = _find_non_increasing_timestamps(timestamps)
self.assertListEqual(non_increasing, expected)
# all timestamps increasing
timestamps = [Fraction(0, 1), Fraction(1, 1), Fraction(2, 1)]
expected = [False, False, False]
non_increasing = _find_non_increasing_timestamps(timestamps)
self.assertListEqual(non_increasing, expected)
# all timestamps equal
timestamps = [Fraction(0, 1), Fraction(0, 1), Fraction(0, 1)]
expected = [False, True, True]
non_increasing = _find_non_increasing_timestamps(timestamps)
self.assertListEqual(non_increasing, expected)
# some timestamps equal and some decreasing
timestamps = [
Fraction(-1, 1),
Fraction(0, 1),
Fraction(1, 1),
Fraction(2, 3),
Fraction(2, 3),
Fraction(2, 1),
Fraction(3, 2),
]
expected = [False, False, False, True, True, False, True]
non_increasing = _find_non_increasing_timestamps(timestamps)
self.assertListEqual(non_increasing, expected)
| 12,849 | 36.573099 | 122 | py |
lightly | lightly-master/tests/data/test_data_collate.py | import random
import unittest
import torch
import torchvision
import torchvision.transforms as transforms
from lightly.data import (
BaseCollateFunction,
ImageCollateFunction,
MultiCropCollateFunction,
PIRLCollateFunction,
SimCLRCollateFunction,
SwaVCollateFunction,
collate,
)
from lightly.data.collate import (
DINOCollateFunction,
MAECollateFunction,
MSNCollateFunction,
MultiViewCollateFunction,
VICRegCollateFunction,
VICRegLCollateFunction,
)
from lightly.transforms import RandomRotate
class TestDataCollate(unittest.TestCase):
def create_batch(self, batch_size=16, seed=0):
torch.manual_seed(0)
rnd_images = torchvision.datasets.FakeData(size=batch_size)
fnames = [f"img_{i}.jpg" for i in range(batch_size)]
labels = [random.randint(0, 5) for i in range(batch_size)]
batch = []
for i in range(batch_size):
batch.append((rnd_images[i][0], labels[i], fnames[i]))
return batch
def test_base_collate(self):
batch = self.create_batch()
transform = transforms.ToTensor()
collate = BaseCollateFunction(transform)
samples, labels, fnames = collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_image_collate(self):
batch = self.create_batch()
img_collate = ImageCollateFunction()
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_image_collate_tuple_input_size(self):
batch = self.create_batch()
img_collate = ImageCollateFunction(
input_size=(32, 32),
)
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_image_collate_random_rotate(self):
batch = self.create_batch()
img_collate = ImageCollateFunction(rr_prob=1.0, rr_degrees=45.0)
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_image_collate_random_rotate__tuple_degrees(self):
batch = self.create_batch()
img_collate = ImageCollateFunction(rr_prob=1.0, rr_degrees=(-15.0, 45.0))
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_simclr_collate_tuple_input_size(self):
batch = self.create_batch()
img_collate = SimCLRCollateFunction(
input_size=(32, 32),
)
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_multi_crop_collate(self):
batch = self.create_batch()
for high in range(2, 4):
for low in range(6):
with self.subTest(msg="n_low_res={low}, n_high_res={high}"):
multi_crop_collate = MultiCropCollateFunction(
crop_sizes=[32, 16],
crop_counts=[high, low],
crop_min_scales=[0.14, 0.04],
crop_max_scales=[1.0, 0.14],
transforms=torchvision.transforms.ToTensor(),
)
samples, labels, fnames = multi_crop_collate(batch)
self.assertIsNotNone(multi_crop_collate)
self.assertEqual(len(samples), low + high)
for i, crop in enumerate(samples):
if i < high:
self.assertEqual(crop.shape[-1], 32)
self.assertEqual(crop.shape[-2], 32)
else:
self.assertEqual(crop.shape[-1], 16)
self.assertEqual(crop.shape[-2], 16)
self.assertEqual(len(crop), len(labels), len(fnames))
def test_swav_collate_init(self):
swav_collate = SwaVCollateFunction()
def test_swav_collate_init_fail(self):
with self.assertRaises(ValueError):
SwaVCollateFunction(
crop_sizes=[1],
crop_counts=[2, 3],
)
def test_multi_view_collate(self):
to_tensor = transforms.ToTensor()
hflip = transforms.Compose(
[
transforms.RandomHorizontalFlip(p=1),
to_tensor,
]
)
vflip = transforms.Compose(
[
transforms.RandomVerticalFlip(p=1),
to_tensor,
]
)
trans = [to_tensor, hflip, vflip]
collate_fn = MultiViewCollateFunction(trans)
batch = self.create_batch()
imgs = batch[0]
views, labels, fnames = collate_fn(batch)
self.assertEqual(len(labels), len(batch))
self.assertEqual(len(fnames), len(batch))
self.assertTrue(torch.equal(views[0][0], to_tensor(imgs[0])))
self.assertTrue(torch.equal(views[1][0], hflip(imgs[0])))
self.assertTrue(torch.equal(views[2][0], vflip(imgs[0])))
def test_dino_collate_init(self):
DINOCollateFunction()
def test_dino_collate_forward(self):
batch = self.create_batch()
collate_fn = DINOCollateFunction()
views, labels, fnames = collate_fn(batch)
def test_mae_collate_init(self):
MAECollateFunction()
def test_mae_collate_forward(self):
batch = self.create_batch()
collate_fn = MAECollateFunction()
views, labels, fnames = collate_fn(batch)
def test_pirl_collate_init(self):
PIRLCollateFunction()
def test_pirl_collate_forward_tuple_input_size(self):
batch = self.create_batch()
img_collate = PIRLCollateFunction(
input_size=(32, 32),
)
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
def test_pirl_collate_forward_n_grid(self):
batch = self.create_batch()
img_collate = PIRLCollateFunction(input_size=32, n_grid=3)
samples, labels, fnames = img_collate(batch)
samples0, samples1 = samples
self.assertIsNotNone(img_collate)
self.assertEqual(len(samples0), len(samples1))
self.assertEqual(len(samples1), len(labels), len(fnames))
self.assertEqual(samples1.shape, (16, 9, 3, 10, 10))
def test_msn_collate_init(self):
MSNCollateFunction()
def test_msn_collate_forward(self):
batch = self.create_batch()
img_collate = MSNCollateFunction(
random_size=24, focal_size=12, random_views=2, focal_views=10
)
views, labels, fnames = img_collate(batch)
self.assertEqual(len(views), 2 + 10)
self.assertEqual(len(labels), len(batch))
self.assertEqual(len(fnames), len(batch))
for view in views[:2]:
self.assertEqual(view.shape, (16, 3, 24, 24))
for view in views[2:]:
self.assertEqual(view.shape, (16, 3, 12, 12))
def test_vicreg_collate_init(self):
VICRegCollateFunction()
def test_vicreg_collate_forward(self):
batch = self.create_batch()
collate_fn = VICRegCollateFunction()
views, labels, fnames = collate_fn(batch)
def test_vicregl_collate_init(self):
VICRegLCollateFunction()
def test_vicregl_collate_forward(self):
batch = self.create_batch()
collate_fn = VICRegLCollateFunction()
views, labels, fnames = collate_fn(batch)
| 8,511 | 34.028807 | 81 | py |
lightly | lightly-master/tests/data/test_multi_view_collate.py | from typing import List, Tuple, Union
from warnings import warn
import torch
from torch import Tensor
from lightly.data.multi_view_collate import MultiViewCollate
def test_empty_batch():
multi_view_collate = MultiViewCollate()
batch = []
views, labels, fnames = multi_view_collate(batch)
assert len(views) == 0
assert len(labels) == 0
assert len(fnames) == 0
def test_single_item_batch():
multi_view_collate = MultiViewCollate()
img = [torch.randn((3, 224, 224)) for _ in range(5)]
label = 1
fname = "image1.jpg"
batch = [(img, label, fname)]
views, labels, fnames = multi_view_collate(batch)
assert len(views) == 5
assert views[0].shape == (1, 3, 224, 224)
assert torch.equal(labels, torch.tensor([1]))
assert fnames == ["image1.jpg"]
def test_multiple_item_batch():
multi_view_collate = MultiViewCollate()
img1 = [torch.randn((3, 224, 224)) for _ in range(5)]
label1 = 1
fname1 = "image1.jpg"
img2 = [torch.randn((3, 224, 224)) for _ in range(5)]
label2 = 2
fname2 = "image2.jpg"
batch = [(img1, label1, fname1), (img2, label2, fname2)]
views, labels, fnames = multi_view_collate(batch)
assert len(views) == 5
assert views[0].shape == (2, 3, 224, 224)
assert torch.equal(labels, torch.tensor([1, 2], dtype=torch.long))
assert fnames == ["image1.jpg", "image2.jpg"]
| 1,391 | 29.26087 | 70 | py |
lightly | lightly-master/tests/embedding/test_callbacks.py | import pytest
from omegaconf import OmegaConf
from lightly.embedding import callbacks
def test_create_summary_callback():
summary_cb = callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create({"max_depth": 99}),
trainer_config=OmegaConf.create(),
)
assert summary_cb._max_depth == 99
def test_create_summary_callback__weights_summary():
# If "weights_summary" is specified, it takes precedence.
summary_cb = callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create({"max_depth": 99}),
trainer_config=OmegaConf.create({"weights_summary": "top"}),
)
assert summary_cb._max_depth == 1
summary_cb = callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create({"max_depth": 99}),
trainer_config=OmegaConf.create({"weights_summary": "full"}),
)
assert summary_cb._max_depth == -1
# If "weights_summary" is None or "None", normal config is applied.
summary_cb = callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create({"max_depth": 99}),
trainer_config=OmegaConf.create({"weights_summary": None}),
)
assert summary_cb._max_depth == 99
summary_cb = callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create({"max_depth": 99}),
trainer_config=OmegaConf.create({"weights_summary": "None"}),
)
assert summary_cb._max_depth == 99
with pytest.raises(ValueError):
callbacks.create_summary_callback(
summary_callback_config=OmegaConf.create(),
trainer_config=OmegaConf.create({"weights_summary": "invalid"}),
)
| 1,689 | 34.957447 | 76 | py |
lightly | lightly-master/tests/embedding/test_embedding.py | import os
import tempfile
import unittest
from typing import List, Tuple
import numpy as np
import torch
import torchvision
from hydra.experimental import compose, initialize
from torch import manual_seed
from torch.utils.data import DataLoader
from lightly.cli._helpers import get_model_from_config
from lightly.data import LightlyDataset
class TestLightlyDataset(unittest.TestCase):
def setUp(self):
self.folder_path, self.sample_names = self.create_dataset_no_subdir(10)
with initialize(config_path="../../lightly/cli/config", job_name="test_app"):
self.cfg = compose(
config_name="config",
overrides=[
'token="123"',
f"input_dir={self.folder_path}",
"trainer.max_epochs=0",
],
)
def create_dataset_no_subdir(self, n_samples: int) -> Tuple[str, List[str]]:
dataset = torchvision.datasets.FakeData(size=n_samples, image_size=(3, 32, 32))
tmp_dir = tempfile.mkdtemp()
sample_names = [f"img_{i}.jpg" for i in range(n_samples)]
for sample_idx in range(n_samples):
data = dataset[sample_idx]
path = os.path.join(tmp_dir, sample_names[sample_idx])
data[0].save(path)
return tmp_dir, sample_names
def test_embed_correct_order(self):
# get dataset and encoder
transform = torchvision.transforms.ToTensor()
dataset = LightlyDataset(self.folder_path, transform=transform)
encoder = get_model_from_config(self.cfg)
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
manual_seed(42)
dataloader_1_worker = DataLoader(
dataset, shuffle=True, num_workers=0, batch_size=4
)
embeddings_1_worker, labels_1_worker, filenames_1_worker = encoder.embed(
dataloader_1_worker,
device=device,
)
manual_seed(43)
dataloader_4_worker = DataLoader(
dataset, shuffle=True, num_workers=4, batch_size=4
)
embeddings_4_worker, labels_4_worker, filenames_4_worker = encoder.embed(
dataloader_4_worker,
device=device,
)
np.testing.assert_allclose(embeddings_1_worker, embeddings_4_worker, rtol=5e-5)
np.testing.assert_allclose(labels_1_worker, labels_4_worker, rtol=1e-5)
self.assertListEqual(filenames_1_worker, filenames_4_worker)
self.assertListEqual(filenames_1_worker, dataset.get_filenames())
| 2,614 | 34.337838 | 87 | py |
lightly | lightly-master/tests/loss/test_CO2Regularizer.py | import unittest
import torch
from lightly.loss.regularizer import CO2Regularizer
class TestCO2Regularizer(unittest.TestCase):
def test_forward_pass_no_memory_bank(self):
reg = CO2Regularizer(memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
# symmetry
l1 = reg(batch_1, batch_2)
l2 = reg(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_memory_bank(self):
reg = CO2Regularizer(memory_bank_size=4096)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
l1 = reg(batch_1, batch_2)
self.assertGreater(l1.item(), 0)
def test_forward_pass_cuda_no_memory_bank(self):
if not torch.cuda.is_available():
return
reg = CO2Regularizer(memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32)).cuda()
batch_2 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = reg(batch_1, batch_2)
l2 = reg(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_cuda_memory_bank(self):
if not torch.cuda.is_available():
return
reg = CO2Regularizer(memory_bank_size=4096)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32)).cuda()
batch_2 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = reg(batch_1, batch_2)
self.assertGreater(l1.cpu().item(), 0)
| 1,708 | 30.072727 | 64 | py |
lightly | lightly-master/tests/loss/test_DCLLoss.py | import unittest
from unittest.mock import patch
import pytest
import torch
from pytest_mock import MockerFixture
from torch import distributed as dist
from lightly.loss.dcl_loss import DCLLoss, DCLWLoss, negative_mises_fisher_weights
class TestDCLLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
DCLLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
DCLLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestDCLUnitTest(unittest.TestCase):
# Old tests in unittest style, please add new tests to TestDCLLoss using pytest.
def test_negative_mises_fisher_weights(self, seed=0):
torch.manual_seed(seed)
out0 = torch.rand((3, 5))
out1 = torch.rand((3, 5))
for sigma in [0.0000001, 0.5, 10000]:
with self.subTest(sigma=sigma):
negative_mises_fisher_weights(out0, out1, sigma)
def test_dclloss_forward(self, seed=0):
torch.manual_seed(seed=seed)
for batch_size in [2, 3]:
for dim in [1, 3]:
out0 = torch.rand((batch_size, dim))
out1 = torch.rand((batch_size, dim))
for temperature in [0.1, 0.5, 1.0]:
for gather_distributed in [False, True]:
for weight_fn in [None, negative_mises_fisher_weights]:
with self.subTest(
batch_size=batch_size,
dim=dim,
temperature=temperature,
gather_distributed=gather_distributed,
weight_fn=weight_fn,
):
criterion = DCLLoss(
temperature=temperature,
gather_distributed=gather_distributed,
weight_fn=weight_fn,
)
loss0 = criterion(out0, out1)
loss1 = criterion(out1, out0)
self.assertGreater(loss0, 0)
self.assertAlmostEqual(loss0, loss1)
def test_dclloss_backprop(self, seed=0):
torch.manual_seed(seed=seed)
out0 = torch.rand(3, 5)
out1 = torch.rand(3, 5)
layer = torch.nn.Linear(5, 5)
out0 = layer(out0)
out1 = layer(out1)
criterion = DCLLoss()
optimizer = torch.optim.SGD(layer.parameters(), lr=0.1)
loss = criterion(out0, out1)
loss.backward()
optimizer.step()
def test_dclwloss_forward(self, seed=0):
torch.manual_seed(seed=seed)
out0 = torch.rand(3, 5)
out1 = torch.rand(3, 5)
criterion = DCLWLoss()
loss0 = criterion(out0, out1)
loss1 = criterion(out1, out0)
self.assertGreater(loss0, 0)
self.assertAlmostEqual(loss0, loss1)
| 3,386 | 37.931034 | 88 | py |
lightly | lightly-master/tests/loss/test_DINOLoss.py | import copy
import itertools
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from lightly.loss import DINOLoss
from lightly.models.utils import deactivate_requires_grad
class OriginalDINOLoss(nn.Module):
"""Copy paste from the original DINO paper. We use this to verify our
implementation.
The only change from the original code is that distributed training is no
longer assumed.
Source: https://github.com/facebookresearch/dino/blob/cb711401860da580817918b9167ed73e3eef3dcf/main_dino.py#L363
"""
def __init__(
self,
out_dim,
ncrops,
warmup_teacher_temp,
teacher_temp,
warmup_teacher_temp_epochs,
nepochs,
student_temp=0.1,
center_momentum=0.9,
):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.ncrops = ncrops
self.register_buffer("center", torch.zeros(1, out_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = np.concatenate(
(
np.linspace(
warmup_teacher_temp, teacher_temp, warmup_teacher_temp_epochs
),
np.ones(nepochs - warmup_teacher_temp_epochs) * teacher_temp,
)
)
def forward(self, student_output, teacher_output, epoch):
"""
Cross-entropy between softmax outputs of the teacher and student networks.
"""
student_out = student_output / self.student_temp
student_out = student_out.chunk(self.ncrops)
# teacher centering and sharpening
temp = self.teacher_temp_schedule[epoch]
teacher_out = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_out = teacher_out.detach().chunk(2)
total_loss = 0
n_loss_terms = 0
for iq, q in enumerate(teacher_out):
for v in range(len(student_out)):
if v == iq:
# we skip cases where student and teacher operate on the same view
continue
s_out = F.log_softmax(student_out[v], dim=-1)
loss = torch.sum(-q * s_out, dim=-1)
total_loss += loss.mean()
n_loss_terms += 1
total_loss /= n_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output):
"""
Update center used for teacher output.
"""
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
batch_center = batch_center / len(teacher_output)
# ema update
self.center = self.center * self.center_momentum + batch_center * (
1 - self.center_momentum
)
class TestDINOLoss(unittest.TestCase):
def generate_output(self, batch_size=2, n_views=3, output_dim=4, seed=0):
"""Returns a list of view representations.
Example output:
[
torch.Tensor([img0_view0, img1_view0]),
torch.Tensor([img0_view1, img1_view1])
]
"""
torch.manual_seed(seed)
out = []
for _ in range(n_views):
views = [torch.rand(output_dim) for _ in range(batch_size)]
out.append(torch.stack(views))
return out
def test_dino_loss_equal_to_original(self):
def test(
batch_size=3,
n_global=2, # number of global views
n_local=6, # number of local views
output_dim=4,
warmup_teacher_temp=0.04,
teacher_temp=0.04,
warmup_teacher_temp_epochs=30,
student_temp=0.1,
center_momentum=0.9,
epoch=0,
n_epochs=100,
):
"""Runs test with the given input parameters."""
with self.subTest(
f"batch_size={batch_size}, n_global={n_global}, "
f"n_local={n_local}, output_dim={output_dim}, "
f"warmup_teacher_temp={warmup_teacher_temp}, "
f"teacher_temp={teacher_temp}, "
f"warmup_teacher_temp_epochs={warmup_teacher_temp_epochs}, "
f"student_temp={student_temp}, "
f"center_momentum={center_momentum}, epoch={epoch}, "
f"n_epochs={n_epochs}"
):
loss_fn = DINOLoss(
output_dim=output_dim,
warmup_teacher_temp=warmup_teacher_temp,
teacher_temp=teacher_temp,
warmup_teacher_temp_epochs=warmup_teacher_temp_epochs,
student_temp=student_temp,
center_momentum=center_momentum,
)
orig_loss_fn = OriginalDINOLoss(
out_dim=output_dim,
ncrops=n_global + n_local,
teacher_temp=teacher_temp,
warmup_teacher_temp=warmup_teacher_temp,
warmup_teacher_temp_epochs=warmup_teacher_temp_epochs,
nepochs=n_epochs,
student_temp=student_temp,
center_momentum=center_momentum,
)
# Create dummy single layer network. We use this to verify
# that the gradient backprop works properly.
teacher = torch.nn.Linear(output_dim, output_dim)
deactivate_requires_grad(teacher)
student = torch.nn.Linear(output_dim, output_dim)
orig_teacher = copy.deepcopy(teacher)
orig_student = copy.deepcopy(student)
optimizer = torch.optim.SGD(student.parameters(), lr=1)
orig_optimizer = torch.optim.SGD(orig_student.parameters(), lr=1)
# Create fake output
teacher_out = self.generate_output(
batch_size=batch_size,
n_views=n_global,
output_dim=output_dim,
seed=0,
)
student_out = self.generate_output(
batch_size=batch_size,
n_views=n_global + n_local,
output_dim=output_dim,
seed=1,
)
# Clone input tensors
orig_teacher_out = torch.cat(teacher_out)
orig_teacher_out = orig_teacher_out.detach().clone()
orig_student_out = torch.cat(student_out)
orig_student_out = orig_student_out.detach().clone()
# Forward pass
teacher_out = [teacher(view) for view in teacher_out]
student_out = [student(view) for view in student_out]
orig_teacher_out = orig_teacher(orig_teacher_out)
orig_student_out = orig_student(orig_student_out)
# Calculate loss
loss = loss_fn(
teacher_out=teacher_out,
student_out=student_out,
epoch=epoch,
)
orig_loss = orig_loss_fn(
student_output=orig_student_out,
teacher_output=orig_teacher_out,
epoch=epoch,
)
# Backward pass and optimizer step
optimizer.zero_grad()
orig_optimizer.zero_grad()
loss.backward()
orig_loss.backward()
optimizer.step()
orig_optimizer.step()
# Loss and loss center should be equal
center = loss_fn.center.squeeze()
orig_center = orig_loss_fn.center.squeeze()
self.assertTrue(torch.allclose(center, orig_center))
self.assertTrue(torch.allclose(loss, orig_loss))
# Parameters of network should be equal after backward pass
for param, orig_param in zip(
student.parameters(), orig_student.parameters()
):
self.assertTrue(torch.allclose(param, orig_param))
for param, orig_param in zip(
teacher.parameters(), orig_teacher.parameters()
):
self.assertTrue(torch.allclose(param, orig_param))
def test_all(**kwargs):
"""Tests all combinations of the input parameters"""
parameters = []
for name, values in kwargs.items():
parameters.append([(name, value) for value in values])
# parameters = [
# [(param1, val11), (param1, val12), ..],
# [(param2, val21), (param2, val22), ..],
# ...
# ]
for params in itertools.product(*parameters):
# params = [(param1, value1), (param2, value2), ...]
test(**dict(params))
# test input sizes
test_all(
batch_size=np.arange(1, 4),
n_local=np.arange(0, 4),
output_dim=np.arange(1, 4),
)
# test teacher temp warmup
test_all(
warmup_teacher_temp=[0.01, 0.04, 0.07],
teacher_temp=[0.01, 0.04, 0.07],
warmup_teacher_temp_epochs=[0, 1, 10],
epoch=[0, 1, 10, 20],
)
# test other params
test_all(
student_temp=[0.05, 0.1, 0.2],
center_momentum=[0.5, 0.9, 0.95],
)
| 9,694 | 36.145594 | 116 | py |
lightly | lightly-master/tests/loss/test_HyperSphere.py | import unittest
import torch
from lightly.loss.hypersphere_loss import HypersphereLoss
class TestHyperSphereLoss(unittest.TestCase):
def test_forward_pass(self):
loss = HypersphereLoss()
# NOTE: skipping bsz==1 case as its not relevant to this loss, and will produce nan-values
for bsz in range(2, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
# symmetry
l1 = loss(batch_1, batch_2)
l2 = loss(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
| 595 | 27.380952 | 98 | py |
lightly | lightly-master/tests/loss/test_MSNLoss.py | import unittest
from unittest import TestCase
import pytest
import torch
import torch.nn.functional as F
from pytest_mock import MockerFixture
from torch import distributed as dist
from torch import nn
from torch.optim import SGD
from lightly.loss import msn_loss
from lightly.loss.msn_loss import MSNLoss
from lightly.models.modules.heads import MSNProjectionHead
class TestMSNLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
MSNLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
MSNLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestMSNLossUnitTest(TestCase):
# Old tests in unittest style, please add new tests to TestMSNLoss using pytest.
def test__init__temperature(self) -> None:
MSNLoss(temperature=1.0)
with self.assertRaises(ValueError):
MSNLoss(temperature=0.0)
with self.assertRaises(ValueError):
MSNLoss(temperature=-1.0)
def test__init__sinkhorn_iterations(self) -> None:
MSNLoss(sinkhorn_iterations=0)
with self.assertRaises(ValueError):
MSNLoss(sinkhorn_iterations=-1)
def test__init__me_max_weight(self) -> None:
criterion = MSNLoss(regularization_weight=0.0, me_max_weight=0.5)
assert criterion.regularization_weight == 0.5
def test_prototype_probabilitiy(self) -> None:
torch.manual_seed(0)
queries = F.normalize(torch.rand((8, 10)), dim=1)
prototypes = F.normalize(torch.rand((4, 10)), dim=1)
prob = msn_loss.prototype_probabilities(queries, prototypes, temperature=0.5)
self.assertEqual(prob.shape, (8, 4))
self.assertLessEqual(prob.max(), 1.0)
self.assertGreater(prob.min(), 0.0)
# verify sharpening
prob1 = msn_loss.prototype_probabilities(queries, prototypes, temperature=0.1)
# same prototypes should be assigned regardless of temperature
self.assertTrue(torch.all(prob.argmax(dim=1) == prob1.argmax(dim=1)))
# probabilities of selected prototypes should be higher for lower temperature
self.assertTrue(torch.all(prob.max(dim=1)[0] < prob1.max(dim=1)[0]))
def test_sharpen(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
p0 = msn_loss.sharpen(prob, temperature=0.5)
p1 = msn_loss.sharpen(prob, temperature=0.1)
# indices of max probabilities should be the same regardless of temperature
self.assertTrue(torch.all(p0.argmax(dim=1) == p1.argmax(dim=1)))
# max probabilities should be higher for lower temperature
self.assertTrue(torch.all(p0.max(dim=1)[0] < p1.max(dim=1)[0]))
def test_sinkhorn(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
out = msn_loss.sinkhorn(prob)
self.assertTrue(torch.all(prob != out))
def test_sinkhorn_no_iter(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
out = msn_loss.sinkhorn(prob, iterations=0)
self.assertTrue(torch.all(prob == out))
def test_forward(self) -> None:
torch.manual_seed(0)
for num_target_views in range(1, 4):
with self.subTest(num_views=num_target_views):
criterion = MSNLoss()
anchors = torch.rand((8 * num_target_views, 10))
targets = torch.rand((8, 10))
prototypes = torch.rand((4, 10), requires_grad=True)
criterion(anchors, targets, prototypes)
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_forward_cuda(self) -> None:
torch.manual_seed(0)
criterion = MSNLoss()
anchors = torch.rand((8 * 2, 10)).cuda()
targets = torch.rand((8, 10)).cuda()
prototypes = torch.rand((4, 10), requires_grad=True).cuda()
criterion(anchors, targets, prototypes)
def test_backward(self) -> None:
torch.manual_seed(0)
head = MSNProjectionHead(5, 16, 6)
criterion = MSNLoss()
optimizer = SGD(head.parameters(), lr=0.1)
anchors = torch.rand((8 * 4, 5))
targets = torch.rand((8, 5))
prototypes = nn.Linear(6, 4).weight # 4 prototypes with dim 6
optimizer.zero_grad()
anchors = head(anchors)
with torch.no_grad():
targets = head(targets)
loss = criterion(anchors, targets, prototypes)
loss.backward()
weights_before = head.layers[0].weight.data.clone()
optimizer.step()
weights_after = head.layers[0].weight.data
# backward pass should update weights
self.assertTrue(torch.any(weights_before != weights_after))
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_backward_cuda(self) -> None:
torch.manual_seed(0)
head = MSNProjectionHead(5, 16, 6)
head.to("cuda")
criterion = MSNLoss()
optimizer = SGD(head.parameters(), lr=0.1)
anchors = torch.rand((8 * 4, 5)).cuda()
targets = torch.rand((8, 5)).cuda()
prototypes = nn.Linear(6, 4).weight.cuda() # 4 prototypes with dim 6
optimizer.zero_grad()
anchors = head(anchors)
with torch.no_grad():
targets = head(targets)
loss = criterion(anchors, targets, prototypes)
loss.backward()
weights_before = head.layers[0].weight.data.clone()
optimizer.step()
weights_after = head.layers[0].weight.data
# backward pass should update weights
self.assertTrue(torch.any(weights_before != weights_after))
| 6,010 | 39.073333 | 88 | py |
lightly | lightly-master/tests/loss/test_MemoryBank.py | import unittest
import torch
from lightly.loss.memory_bank import MemoryBankModule
class TestNTXentLoss(unittest.TestCase):
def test_init__negative_size(self):
with self.assertRaises(ValueError):
MemoryBankModule(size=-1)
def test_forward_easy(self):
bsz = 3
dim, size = 2, 9
n = 33 * bsz
memory_bank = MemoryBankModule(size=size)
ptr = 0
for i in range(0, n, bsz):
output = torch.randn(2 * bsz, dim)
output.requires_grad = True
out0, out1 = output[:bsz], output[bsz:]
_, curr_memory_bank = memory_bank(out1, update=True)
next_memory_bank = memory_bank.bank
curr_diff = out0.T - curr_memory_bank[:, ptr : ptr + bsz]
next_diff = out1.T - next_memory_bank[:, ptr : ptr + bsz]
# the current memory bank should not hold the batch yet
self.assertGreater(curr_diff.norm(), 1e-5)
# the "next" memory bank should hold the batch
self.assertGreater(1e-5, next_diff.norm())
ptr = (ptr + bsz) % size
def test_forward(self):
bsz = 3
dim, size = 2, 10
n = 33 * bsz
memory_bank = MemoryBankModule(size=size)
for i in range(0, n, bsz):
# see if there are any problems when the bank size
# is no multiple of the batch size
output = torch.randn(bsz, dim)
_, _ = memory_bank(output)
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_forward__cuda(self):
bsz = 3
dim, size = 2, 10
n = 33 * bsz
memory_bank = MemoryBankModule(size=size)
device = torch.device("cuda")
memory_bank.to(device=device)
for i in range(0, n, bsz):
# see if there are any problems when the bank size
# is no multiple of the batch size
output = torch.randn(bsz, dim, device=device)
_, _ = memory_bank(output)
| 2,029 | 30.71875 | 73 | py |
lightly | lightly-master/tests/loss/test_NTXentLoss.py | import unittest
import numpy as np
import pytest
import torch
from pytest_mock import MockerFixture
from torch import distributed as dist
from lightly.loss import NTXentLoss
class TestNTXentLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
NTXentLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
NTXentLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestNTXentLossUnitTest(unittest.TestCase):
# Old tests in unittest style, please add new tests to TestNTXentLoss using pytest.
def test_with_values(self):
for n_samples in [1, 2, 4]:
for dimension in [1, 2, 16, 64]:
for temperature in [0.1, 1, 10]:
for gather_distributed in [False, True]:
out0 = np.random.normal(0, 1, size=(n_samples, dimension))
out1 = np.random.normal(0, 1, size=(n_samples, dimension))
with self.subTest(
msg=(
f"out0.shape={out0.shape}, temperature={temperature}, "
f"gather_distributed={gather_distributed}"
)
):
out0 = torch.FloatTensor(out0)
out1 = torch.FloatTensor(out1)
loss_function = NTXentLoss(
temperature=temperature,
gather_distributed=gather_distributed,
)
l1 = float(loss_function(out0, out1))
l2 = float(loss_function(out1, out0))
l1_manual = self.calc_ntxent_loss_manual(
out0, out1, temperature=temperature
)
l2_manual = self.calc_ntxent_loss_manual(
out0, out1, temperature=temperature
)
self.assertAlmostEqual(l1, l2, places=5)
self.assertAlmostEqual(l1, l1_manual, places=5)
self.assertAlmostEqual(l2, l2_manual, places=5)
def calc_ntxent_loss_manual(self, out0, out1, temperature: float) -> float:
# using the pseudocode directly from https://arxiv.org/pdf/2002.05709.pdf , Algorithm 1
out0 = np.array(out0)
out1 = np.array(out1)
N = len(out0)
z = np.concatenate([out0, out1], axis=0)
# different to the notation in the paper, in our case z[k] and z[k+N]
# are different augmentations of the same image
s_i_j = np.zeros((2 * len(out0), 2 * len(out1)))
for i in range(2 * N):
for j in range(2 * N):
sim = np.inner(z[i], z[j]) / (
np.linalg.norm(z[i]) * np.linalg.norm(z[j])
)
s_i_j[i, j] = sim
exponential_i_j = np.exp(s_i_j / temperature)
l_i_j = np.zeros_like(exponential_i_j)
for i in range(2 * N):
for j in range(2 * N):
nominator = exponential_i_j[i, j]
denominator = 0
for k in range(2 * N):
if k != i:
denominator += exponential_i_j[i, k]
l_i_j[i, j] = -1 * np.log(nominator / denominator)
loss = 0
for k in range(N):
loss += l_i_j[k, k + N] + l_i_j[k + N, k]
loss /= 2 * N
return loss
def test_with_correlated_embedding(self):
for n_samples in [1, 2, 8, 16]:
for memory_bank_size in [0, 1, 2, 8, 15, 16, 17]:
for temperature in [0.1, 1, 7]:
for gather_distributed in [False, True]:
out0 = np.random.random((n_samples, 1))
out1 = np.random.random((n_samples, 1))
out0 = np.concatenate([out0, 2 * out0], axis=1)
out1 = np.concatenate([out1, 2 * out1], axis=1)
out0 = torch.FloatTensor(out0)
out1 = torch.FloatTensor(out1)
out0.requires_grad = True
with self.subTest(
msg=(
f"n_samples: {n_samples}, memory_bank_size: {memory_bank_size},"
f"temperature: {temperature}, gather_distributed: {gather_distributed}"
)
):
loss_function = NTXentLoss(
temperature=temperature,
memory_bank_size=memory_bank_size,
)
if memory_bank_size > 0:
for i in range(int(memory_bank_size / n_samples) + 2):
# fill the memory bank over multiple rounds
loss = float(loss_function(out0, out1))
expected_loss = -1 * np.log(1 / (memory_bank_size + 1))
else:
loss = float(loss_function(out0, out1))
expected_loss = -1 * np.log(1 / (2 * n_samples - 1))
self.assertAlmostEqual(loss, expected_loss, places=5)
def test_forward_pass(self):
loss = NTXentLoss(memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
# symmetry
l1 = loss(batch_1, batch_2)
l2 = loss(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_1d(self):
loss = NTXentLoss(memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 1))
batch_2 = torch.randn((bsz, 1))
# symmetry
l1 = loss(batch_1, batch_2)
l2 = loss(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_neg_temp(self):
loss = NTXentLoss(temperature=-1.0, memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
# symmetry
l1 = loss(batch_1, batch_2)
l2 = loss(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_memory_bank(self):
loss = NTXentLoss(memory_bank_size=64)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32))
batch_2 = torch.randn((bsz, 32))
l = loss(batch_1, batch_2)
@unittest.skipUnless(torch.cuda.is_available(), "No cuda")
def test_forward_pass_memory_bank_cuda(self):
loss = NTXentLoss(memory_bank_size=64)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32)).cuda()
batch_2 = torch.randn((bsz, 32)).cuda()
l = loss(batch_1, batch_2)
@unittest.skipUnless(torch.cuda.is_available(), "No cuda")
def test_forward_pass_cuda(self):
loss = NTXentLoss(memory_bank_size=0)
for bsz in range(1, 20):
batch_1 = torch.randn((bsz, 32)).cuda()
batch_2 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = loss(batch_1, batch_2)
l2 = loss(batch_2, batch_1)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
| 8,054 | 40.953125 | 103 | py |
lightly | lightly-master/tests/loss/test_NegativeCosineSimilarity.py | import unittest
import torch
from lightly.loss import NegativeCosineSimilarity
class TestNegativeCosineSimilarity(unittest.TestCase):
def test_forward_pass(self):
loss = NegativeCosineSimilarity()
for bsz in range(1, 20):
x0 = torch.randn((bsz, 32))
x1 = torch.randn((bsz, 32))
# symmetry
l1 = loss(x0, x1)
l2 = loss(x1, x0)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available")
def test_forward_pass_cuda(self):
loss = NegativeCosineSimilarity()
for bsz in range(1, 20):
x0 = torch.randn((bsz, 32)).cuda()
x1 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = loss(x0, x1)
l2 = loss(x1, x0)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
| 906 | 28.258065 | 73 | py |
lightly | lightly-master/tests/loss/test_PMSNLoss.py | import math
import unittest
import pytest
import torch
from torch import Tensor
from lightly.loss import pmsn_loss
from lightly.loss.pmsn_loss import PMSNCustomLoss, PMSNLoss
class TestPMSNLoss:
def test_regularization_loss(self) -> None:
criterion = PMSNLoss()
mean_anchor_probs = torch.Tensor([0.1, 0.3, 0.6]).log()
loss = criterion.regularization_loss(mean_anchor_probs=mean_anchor_probs)
norm = 1 / (1**0.25) + 1 / (2**0.25) + 1 / (3**0.25)
t0 = 1 / (1**0.25) / norm
t1 = 1 / (2**0.25) / norm
t2 = 1 / (3**0.25) / norm
loss = criterion.regularization_loss(mean_anchor_probs=mean_anchor_probs)
expected_loss = (
t0 * math.log(t0 / 0.1) + t1 * math.log(t1 / 0.3) + t2 * math.log(t2 / 0.6)
)
assert loss == pytest.approx(expected_loss)
def test_forward(self) -> None:
torch.manual_seed(0)
criterion = PMSNLoss()
anchors = torch.rand((8 * 3, 10))
targets = torch.rand((8, 10))
prototypes = torch.rand((4, 10), requires_grad=True)
criterion(anchors, targets, prototypes)
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_forward_cuda(self) -> None:
torch.manual_seed(0)
criterion = PMSNLoss()
anchors = torch.rand((8 * 3, 10)).cuda()
targets = torch.rand((8, 10)).cuda()
prototypes = torch.rand((4, 10), requires_grad=True).cuda()
criterion(anchors, targets, prototypes)
class TestPMSNCustomLoss:
def test_regularization_loss(self) -> None:
criterion = PMSNCustomLoss(target_distribution=_uniform_distribution)
mean_anchor_probs = torch.Tensor([0.1, 0.3, 0.6]).log()
loss = criterion.regularization_loss(mean_anchor_probs=mean_anchor_probs)
expected_loss = (
1
/ 3
* (
math.log((1 / 3) / 0.1)
+ math.log((1 / 3) / 0.3)
+ math.log((1 / 3) / 0.6)
)
)
assert loss == pytest.approx(expected_loss)
def test_forward(self) -> None:
torch.manual_seed(0)
criterion = PMSNCustomLoss(target_distribution=_uniform_distribution)
anchors = torch.rand((8 * 3, 10))
targets = torch.rand((8, 10))
prototypes = torch.rand((4, 10), requires_grad=True)
criterion(anchors, targets, prototypes)
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_forward_cuda(self) -> None:
torch.manual_seed(0)
criterion = PMSNCustomLoss(target_distribution=_uniform_distribution)
anchors = torch.rand((8 * 2, 10)).cuda()
targets = torch.rand((8, 10)).cuda()
prototypes = torch.rand((4, 10), requires_grad=True).cuda()
criterion(anchors, targets, prototypes)
def test__power_law_distribution() -> None:
power_dist = pmsn_loss._power_law_distribution(
size=4, exponent=0.5, device=torch.device("cpu")
)
# 2.784457050376173 == sum(1/(k**0.5) for k in range(1, 5))
assert torch.allclose(
power_dist,
torch.Tensor(
[
1 / (1**0.5),
1 / (2**0.5),
1 / (3**0.5),
1 / (4**0.5),
]
)
/ 2.784457050376173,
)
assert power_dist.device == torch.device("cpu")
assert torch.allclose(power_dist.sum(), torch.Tensor([1.0]))
def _uniform_distribution(mean_anchor_probabilities: Tensor) -> Tensor:
dim = mean_anchor_probabilities.shape[0]
return mean_anchor_probabilities.new_ones(dim) / dim
| 3,635 | 34.300971 | 87 | py |
lightly | lightly-master/tests/loss/test_SwaVLoss.py | import unittest
import pytest
import torch
from pytest_mock import MockerFixture
from torch import distributed as dist
from lightly.loss import SwaVLoss
class TestNTXentLoss:
def test__sinkhorn_gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
SwaVLoss(sinkhorn_gather_distributed=True)
mock_is_available.assert_called_once()
def test__sinkhorn_gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
SwaVLoss(sinkhorn_gather_distributed=True)
mock_is_available.assert_called_once()
class TestSwaVLossUnitTest(unittest.TestCase):
# Old tests in unittest style, please add new tests to TestSwavLoss using pytest.
def test_forward_pass(self):
n = 32
n_high_res = 2
high_res = [torch.eye(32, 32) for i in range(n_high_res)]
for n_low_res in range(6):
for sinkhorn_iterations in range(3):
criterion = SwaVLoss(sinkhorn_iterations=sinkhorn_iterations)
low_res = [torch.eye(n, n) for i in range(n_low_res)]
with self.subTest(
msg=f"n_low_res={n_low_res}, sinkhorn_iterations={sinkhorn_iterations}"
):
loss = criterion(high_res, low_res)
# loss should be almost zero for unit matrix
self.assertGreater(0.5, loss.cpu().numpy())
def test_forward_pass_queue(self):
n = 32
n_high_res = 2
high_res = [torch.eye(32, 32) for i in range(n_high_res)]
queue_length = 128
queue = [torch.eye(128, 32) for i in range(n_high_res)]
for n_low_res in range(6):
for sinkhorn_iterations in range(3):
criterion = SwaVLoss(sinkhorn_iterations=sinkhorn_iterations)
low_res = [torch.eye(n, n) for i in range(n_low_res)]
with self.subTest(
msg=f"n_low_res={n_low_res}, sinkhorn_iterations={sinkhorn_iterations}"
):
loss = criterion(high_res, low_res, queue)
# loss should be almost zero for unit matrix
self.assertGreater(0.5, loss.cpu().numpy())
def test_forward_pass_bsz_1(self):
n = 32
n_high_res = 2
high_res = [torch.eye(1, n) for i in range(n_high_res)]
for n_low_res in range(6):
for sinkhorn_iterations in range(3):
criterion = SwaVLoss(sinkhorn_iterations=sinkhorn_iterations)
low_res = [torch.eye(1, n) for i in range(n_low_res)]
with self.subTest(
msg=f"n_low_res={n_low_res}, sinkhorn_iterations={sinkhorn_iterations}"
):
loss = criterion(high_res, low_res)
def test_forward_pass_1d(self):
n = 32
n_high_res = 2
high_res = [torch.eye(n, 1) for i in range(n_high_res)]
for n_low_res in range(6):
for sinkhorn_iterations in range(3):
criterion = SwaVLoss(sinkhorn_iterations=sinkhorn_iterations)
low_res = [torch.eye(n, 1) for i in range(n_low_res)]
with self.subTest(
msg=f"n_low_res={n_low_res}, sinkhorn_iterations={sinkhorn_iterations}"
):
loss = criterion(high_res, low_res)
# loss should be almost zero for unit matrix
self.assertGreater(0.5, loss.cpu().numpy())
@unittest.skipUnless(torch.cuda.is_available(), "skip")
def test_forward_pass_cuda(self):
n = 32
n_high_res = 2
high_res = [torch.eye(n, n).cuda() for i in range(n_high_res)]
for n_low_res in range(6):
for sinkhorn_iterations in range(3):
criterion = SwaVLoss(sinkhorn_iterations=sinkhorn_iterations)
low_res = [torch.eye(n, n).cuda() for i in range(n_low_res)]
with self.subTest(
msg=f"n_low_res={n_low_res}, sinkhorn_iterations={sinkhorn_iterations}"
):
loss = criterion(high_res, low_res)
# loss should be almost zero for unit matrix
self.assertGreater(0.5, loss.cpu().numpy())
| 4,520 | 38.313043 | 91 | py |
lightly | lightly-master/tests/loss/test_SymNegCosineSimilarityLoss.py | import unittest
import torch
from lightly.loss import SymNegCosineSimilarityLoss
class TestSymNegCosineSimilarityLoss(unittest.TestCase):
def test_forward_pass(self):
loss = SymNegCosineSimilarityLoss()
for bsz in range(1, 20):
z0 = torch.randn((bsz, 32))
p0 = torch.randn((bsz, 32))
z1 = torch.randn((bsz, 32))
p1 = torch.randn((bsz, 32))
# symmetry
l1 = loss((z0, p0), (z1, p1))
l2 = loss((z1, p1), (z0, p0))
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_forward_pass_cuda(self):
if not torch.cuda.is_available():
return
loss = SymNegCosineSimilarityLoss()
for bsz in range(1, 20):
z0 = torch.randn((bsz, 32)).cuda()
p0 = torch.randn((bsz, 32)).cuda()
z1 = torch.randn((bsz, 32)).cuda()
p1 = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = loss((z0, p0), (z1, p1))
l2 = loss((z1, p1), (z0, p0))
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_neg_cosine_simililarity(self):
loss = SymNegCosineSimilarityLoss()
for bsz in range(1, 20):
x = torch.randn((bsz, 32))
y = torch.randn((bsz, 32))
# symmetry
l1 = loss._neg_cosine_simililarity(x, y)
l2 = loss._neg_cosine_simililarity(y, x)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
def test_neg_cosine_simililarity_cuda(self):
if not torch.cuda.is_available():
return
loss = SymNegCosineSimilarityLoss()
for bsz in range(1, 20):
x = torch.randn((bsz, 32)).cuda()
y = torch.randn((bsz, 32)).cuda()
# symmetry
l1 = loss._neg_cosine_simililarity(x, y)
l2 = loss._neg_cosine_simililarity(y, x)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0)
| 1,995 | 31.193548 | 64 | py |
lightly | lightly-master/tests/loss/test_TicoLoss.py | import unittest
import pytest
import torch
from pytest_mock import MockerFixture
from torch import distributed as dist
from lightly.loss.tico_loss import TiCoLoss
class TestTiCoLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
TiCoLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
TiCoLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestTiCoLossUnitTest(unittest.TestCase):
# Old tests in unittest style, please add new tests to TestTiCoLoss using pytest.
def test_forward_pass(self):
torch.manual_seed(0)
loss = TiCoLoss()
for bsz in range(2, 4):
x0 = torch.randn((bsz, 256))
x1 = torch.randn((bsz, 256))
# symmetry
l1 = loss(x0, x1, update_covariance_matrix=False)
l2 = loss(x1, x0, update_covariance_matrix=False)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0, 2)
@unittest.skipUnless(torch.cuda.is_available(), "Cuda not available")
def test_forward_pass_cuda(self):
torch.manual_seed(0)
loss = TiCoLoss()
for bsz in range(2, 4):
x0 = torch.randn((bsz, 256)).cuda()
x1 = torch.randn((bsz, 256)).cuda()
# symmetry
l1 = loss(x0, x1, update_covariance_matrix=False)
l2 = loss(x1, x0, update_covariance_matrix=False)
self.assertAlmostEqual((l1 - l2).pow(2).item(), 0.0, 2)
def test_forward_pass__error_batch_size_1(self):
torch.manual_seed(0)
loss = TiCoLoss()
x0 = torch.randn((1, 256))
x1 = torch.randn((1, 256))
with self.assertRaises(AssertionError):
loss(x0, x1, update_covariance_matrix=False)
def test_forward_pass__error_different_shapes(self):
torch.manual_seed(0)
loss = TiCoLoss()
x0 = torch.randn((2, 32))
x1 = torch.randn((2, 16))
with self.assertRaises(AssertionError):
loss(x0, x1, update_covariance_matrix=False)
| 2,421 | 33.6 | 88 | py |