Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
lightly | lightly-master/lightly/data/_helpers.py | """ Helper Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
from typing import Any, Callable, Dict, List, Optional, Set
from torchvision import datasets
from lightly.data._image import DatasetFolder
try:
from lightly.data._video import VideoDataset
VIDEO_DATASET_AVAILABLE = True
except Exception as e:
VIDEO_DATASET_AVAILABLE = False
VIDEO_DATASET_ERRORMSG = e
IMG_EXTENSIONS = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
VIDEO_EXTENSIONS = (".mp4", ".mov", ".avi", ".mpg", ".hevc", ".m4v", ".webm", ".mpeg")
def _dir_contains_videos(root: str, extensions: tuple):
"""Checks whether directory contains video files.
Args:
root: Root directory path.
Returns:
True if root contains video files.
"""
with os.scandir(root) as scan_dir:
return any(f.name.lower().endswith(extensions) for f in scan_dir)
def _contains_videos(root: str, extensions: tuple):
"""Checks whether directory or any subdirectory contains video files.
Iterates over all subdirectories of "root" recursively and returns True
if any of the subdirectories contains a file with a VIDEO_EXTENSION.
Args:
root: Root directory path.
Returns:
True if "root" or any subdir contains video files.
"""
for subdir, _, _ in os.walk(root):
if _dir_contains_videos(subdir, extensions):
return True
return False
def _is_lightly_output_dir(dirname: str):
"""Checks whether the directory is a lightly_output directory.
Args:
dirname: Directory to check.
Returns:
True if dirname is "lightly_outputs" else false.
"""
return "lightly_outputs" in dirname
def _contains_subdirs(root: str):
"""Checks whether directory contains subdirectories.
Args:
root: Root directory path.
Returns:
True if root contains subdirectories else false.
"""
with os.scandir(root) as scan_dir:
return any(not _is_lightly_output_dir(f.name) for f in scan_dir if f.is_dir())
def _load_dataset_from_folder(
root: str,
transform,
is_valid_file: Optional[Callable[[str], bool]] = None,
tqdm_args: Dict[str, Any] = None,
num_workers_video_frame_counting: int = 0,
):
"""Initializes dataset from folder.
Args:
root: (str) Root directory path
transform: (torchvision.transforms.Compose) image transformations
Returns:
Dataset consisting of images/videos in the root directory.
Raises:
ValueError: If the specified dataset doesn't exist
"""
if not os.path.exists(root):
raise ValueError(f"The input directory {root} does not exist!")
# if there is a video in the input directory but we do not have
# the right dependencies, raise a ValueError
contains_videos = _contains_videos(root, VIDEO_EXTENSIONS)
if contains_videos and not VIDEO_DATASET_AVAILABLE:
raise ValueError(
f"The input directory {root} contains videos "
"but the VideoDataset is not available. \n"
"Make sure you have installed the right "
"dependencies. The error from the imported "
f"module was: {VIDEO_DATASET_ERRORMSG}"
)
if contains_videos:
# root contains videos -> create a video dataset
dataset = VideoDataset(
root,
extensions=VIDEO_EXTENSIONS,
transform=transform,
is_valid_file=is_valid_file,
tqdm_args=tqdm_args,
num_workers=num_workers_video_frame_counting,
)
elif _contains_subdirs(root):
# root contains subdirectories -> create an image folder dataset
dataset = datasets.ImageFolder(
root, transform=transform, is_valid_file=is_valid_file
)
else:
# root contains plain images -> create a folder dataset
dataset = DatasetFolder(
root,
extensions=IMG_EXTENSIONS,
transform=transform,
is_valid_file=is_valid_file,
)
return dataset
| 4,189 | 25.687898 | 86 | py |
lightly | lightly-master/lightly/data/_image.py | """ Image Dataset """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
from typing import List, Set, Tuple
import torchvision.datasets as datasets
from torchvision import transforms
from lightly.data._image_loaders import default_loader
def _make_dataset(
directory, extensions=None, is_valid_file=None
) -> List[Tuple[str, int]]:
"""Returns a list of all image files with targets in the directory.
Args:
directory:
Root directory path (should not contain subdirectories!).
extensions:
Tuple of valid extensions.
is_valid_file:
Used to find valid files.
Returns:
List of instance tuples: (path_i, target_i = 0).
"""
if extensions is None:
if is_valid_file is None:
ValueError("Both extensions and is_valid_file cannot be None")
else:
_is_valid_file = is_valid_file
else:
def is_valid_file_extension(filepath):
return filepath.lower().endswith(extensions)
if is_valid_file is None:
_is_valid_file = is_valid_file_extension
else:
def _is_valid_file(filepath):
return is_valid_file_extension(filepath) and is_valid_file(filepath)
instances = []
for f in os.scandir(directory):
if not _is_valid_file(f.path):
continue
# convention: the label of all images is 0, based on the fact that
# they are all in the same directory
item = (f.path, 0)
instances.append(item)
return sorted(instances, key=lambda x: x[0]) # sort by path
class DatasetFolder(datasets.VisionDataset):
"""Implements a dataset folder.
DatasetFolder based on torchvisions implementation.
(https://pytorch.org/docs/stable/torchvision/datasets.html#datasetfolder)
Attributes:
root:
Root directory path
loader:
Function that loads file at path
extensions:
Tuple of allowed extensions
transform:
Function that takes a PIL image and returns transformed version
target_transform:
As transform but for targets
is_valid_file:
Used to check corrupt files
Raises:
RuntimeError: If no supported files are found in root.
"""
def __init__(
self,
root: str,
loader=default_loader,
extensions=None,
transform=None,
target_transform=None,
is_valid_file=None,
):
super(DatasetFolder, self).__init__(
root, transform=transform, target_transform=target_transform
)
samples = _make_dataset(self.root, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in folder: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.samples = samples
self.targets = [s[1] for s in samples]
def __getitem__(self, index: int):
"""Returns item at index.
Args:
index:
Index of the sample to retrieve.
Returns:
A tuple (sample, target) where target is 0.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
"""Returns the number of samples in the dataset."""
return len(self.samples)
| 3,811 | 26.623188 | 84 | py |
lightly | lightly-master/lightly/data/_image_loaders.py | """torchvision image loaders
(see https://pytorch.org/docs/stable/_modules/torchvision/datasets/folder.html)
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from PIL import Image
def pil_loader(path):
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, "rb") as f:
img = Image.open(f)
return img.convert("RGB")
def accimage_loader(path):
try:
import accimage
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == "accimage":
return accimage_loader(path)
else:
return pil_loader(path)
| 851 | 22.027027 | 79 | py |
lightly | lightly-master/lightly/data/_utils.py | """ Check for Corrupt Images """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
from typing import *
import tqdm.contrib.concurrent as concurrent
from PIL import Image, UnidentifiedImageError
from lightly.data import LightlyDataset
def check_images(data_dir: str) -> Tuple[List[str], List[str]]:
"""Iterate through a directory of images and find corrupt images
Args:
data_dir: Path to the directory containing the images
Returns:
(healthy_images, corrupt_images)
"""
dataset = LightlyDataset(input_dir=data_dir)
filenames = dataset.get_filenames()
def _is_corrupt(filename):
try:
image = Image.open(os.path.join(data_dir, filename))
image.load()
except (IOError, UnidentifiedImageError):
return True
else:
return False
mapped = concurrent.thread_map(
_is_corrupt, filenames, chunksize=min(32, len(filenames))
)
healthy_images = [f for f, is_corrupt in zip(filenames, mapped) if not is_corrupt]
corrupt_images = [f for f, is_corrupt in zip(filenames, mapped) if is_corrupt]
return healthy_images, corrupt_images
| 1,203 | 27.666667 | 86 | py |
lightly | lightly-master/lightly/data/_video.py | """ Video Dataset """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import os
import threading
import warnings
import weakref
from fractions import Fraction
from typing import Any, Dict, List, Tuple
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, io
from tqdm import tqdm
try:
import av
AV_AVAILABLE = True
except ImportError:
AV_AVAILABLE = False
if io._HAS_VIDEO_OPT:
torchvision.set_video_backend("video_reader")
class VideoError(Exception):
"""Base exception class for errors during video loading."""
pass
class EmptyVideoError(VideoError):
"""Exception raised when trying to load a frame from an empty video."""
pass
class FrameShapeError(VideoError):
"""Exception raised when the loaded frame has an unexpected shape."""
pass
class NonIncreasingTimestampError(VideoError):
"""Exception raised when trying to load a frame that has a timestamp
equal or lower than the timestamps of previous frames in the video.
"""
pass
class UnseekableTimestampError(VideoError):
"""Exception raised when trying to load a frame that has a timestamp which
cannot be seeked to by the video loader.
"""
pass
# @guarin 18.02.2022
# VideoLoader and VideoDataset multi-thread and multi-processing infos
# --------------------------------------------------------------------
# The VideoDataset class should be safe to use in multi-thread and
# multi-processing settings. For the multi-processing setting it is assumed that
# a pytorch DataLoader is used. Multi-threading should not be use with the
# torchvision pyav video packend as pyav seems to be limited to a single thread.
# You will not see any speedups when using it from multiple threads!
#
# The VideoLoader class is thread safe because it inherits from threading.local.
# When using it within a pytorch DataLoader a new instance should be created
# in each process when using the torchvision video_reader backend, otherwise
# decoder errors can happen when iterating multiple times over the dataloader.
# This is specific to the video_reader backend and does not happen with the pyav
# backend.
#
# In the VideoDataset class we avoid sharing VideoLoader instances between
# workers by tracking the worker accessing the dataset. VideoLoaders are reset
# if a new worker accesses the dataset. Note that changes to the dataset class
# by a worker are unique to that worker and not seen by other workers or the
# main process.
class VideoLoader(threading.local):
"""Implementation of VideoLoader.
The VideoLoader is a wrapper around the torchvision video interface. With
the VideoLoader you can read specific frames or the next frames of a video.
It automatically switches to the `video_loader` backend if available. Reading
sequential frames is significantly faster since it uses the VideoReader
class from torchvision.
The video loader automatically detects if you read out subsequent frames and
will use the fast read method if possible.
Attributes:
path:
Root directory path.
timestamps:
Function that loads file at path.
backend:
Tuple of allowed extensions.
transform:
Function that takes a PIL image and returns transformed version
target_transform:
As transform but for targets
is_valid_file:
Used to check corrupt files
eps:
Small value to account for floating point imprecisions.
Examples:
>>> from torchvision import io
>>>
>>> # get timestamps
>>> ts, fps = io.read_video_timestamps('myvideo.mp4', pts_unit = 'sec')
>>>
>>> # create a VideoLoader
>>> video_loader = VideoLoader('myvideo.mp4', ts)
>>>
>>> # get frame at specific timestamp
>>> frame = video_loader.read_frame(ts[21])
>>>
>>> # get next frame
>>> frame = video_loader.read_frame()
"""
def __init__(
self,
path: str,
timestamps: List[float],
backend: str = "video_reader",
eps: float = 1e-6,
):
self.path = path
self.timestamps = timestamps
self.current_index = None
self.pts_unit = "sec"
self.backend = backend
self.eps = eps
has_video_reader = io._HAS_VIDEO_OPT and hasattr(io, "VideoReader")
if has_video_reader and self.backend == "video_reader":
self.reader = io.VideoReader(path=self.path)
else:
self.reader = None
def read_frame(self, timestamp=None):
"""Reads the next frame or from timestamp.
If no timestamp is provided this method just returns the next frame from
the video. This is significantly (up to 10x) faster if the `video_loader`
backend is available. If a timestamp is provided we first have to seek
to the right position and then load the frame.
Args:
timestamp:
Specific timestamp of frame in seconds or None (default: None)
Returns:
A PIL Image
Raises:
StopIteration:
If end of video is reached and timestamp is None.
ValueError:
If provided timestamp is not in self.timestamps.
VideoError:
If the frame could not be loaded.
"""
if not self.timestamps:
raise EmptyVideoError(f"Cannot load frame from empty video {self.path}.")
if timestamp is None:
# Try to read next frame.
if self.current_index is None:
# Beginning of video.
index = 0
timestamp = self.timestamps[index]
elif self.current_index >= len(self.timestamps):
# Reached end of video.
raise StopIteration()
else:
# Read next frame.
index = self.current_index + 1
timestamp = self.timestamps[index]
elif (
self.current_index is not None
and self.current_index + 1 < len(self.timestamps)
and timestamp == self.timestamps[self.current_index + 1]
):
# Provided timestamp is timestamp of next frame.
index = self.current_index + 1
else:
# Random timestamp, must find corresponding index.
index = self.timestamps.index(timestamp)
if self.reader:
# Only seek if we cannot just call next(self.reader).
if (
self.current_index is None
and index != 0
or self.current_index is not None
and index != self.current_index + 1
):
self.reader.seek(timestamp)
# Find next larger timestamp than the one we seek. Used to verify
# that we did not seek too far in the video and that the correct
# frame is returned.
if index + 1 < len(self.timestamps):
try:
next_timestamp = next(
ts for ts in self.timestamps[index + 1 :] if ts > timestamp
)
except StopIteration:
# All timestamps of future frames are smaller.
next_timestamp = float("inf")
else:
# Want to load last frame in video.
next_timestamp = float("inf")
# Load the frame.
try:
while True:
frame_info = next(self.reader)
if frame_info["pts"] < timestamp - self.eps:
# Did not read far enough, let's continue reading more
# frames. This can happen due to decreasing timestamps.
frame_info = next(self.reader)
elif frame_info["pts"] >= next_timestamp:
# Accidentally read too far, let's seek back to the
# correct position and exit. This can happen due to
# imprecise seek.
self.reader.seek(timestamp)
frame_info = next(self.reader)
break
else:
break
except StopIteration:
# Accidentally reached the end of the video, let's seek back to
# the correction position. This can happen due to imprecise seek.
self.reader.seek(timestamp)
try:
frame_info = next(self.reader)
except StopIteration as ex:
# Seeking to this timestamp simply doesn't work.
raise UnseekableTimestampError(
f"Cannot seek to frame with timestamp {float(timestamp)} "
f"in {self.path}."
) from ex
if (
frame_info["pts"] < timestamp - self.eps
or frame_info["pts"] >= next_timestamp
):
# We accidentally loaded the wrong frame. This should only
# happen if self.reader.seek(timestamp) does not seek to the
# correct timestamp. In this case there is nothing we can do to
# load the correct frame and we alert the user that something
# went wrong.
warnings.warn(
f"Loaded wrong frame in {self.path}! Tried to load frame "
f"with index {index} and timestamp {float(timestamp)} but "
f'could only find frame with timestamp {frame_info["pts"]}.'
)
# Make sure we have the tensor in correct shape (we want H x W x C)
frame = frame_info["data"].permute(1, 2, 0)
self.current_index = index
else: # fallback on pyav
frame, _, _ = io.read_video(
self.path,
start_pts=timestamp,
end_pts=timestamp,
pts_unit=self.pts_unit,
)
self.current_index = index
if len(frame.shape) < 3:
raise FrameShapeError(
f"Loaded frame has unexpected shape {frame.shape}. "
f"Frames are expected to have 3 dimensions: (H, W, C)."
)
# sometimes torchvision returns multiple frames for one timestamp (bug?)
if len(frame.shape) > 3 and frame.shape[0] > 1:
frame = frame[0]
# make sure we return a H x W x C tensor and not (1 x H x W x C)
if len(frame.shape) == 4:
frame = frame.squeeze()
# convert to PIL image
image = Image.fromarray(frame.numpy())
return image
class _TimestampFpsFromVideosDataset(Dataset):
def __init__(self, video_instances: List[str], pts_unit: str):
self.video_instances = video_instances
self.pts_unit = pts_unit
def __len__(self):
return len(self.video_instances)
def __getitem__(self, index):
instance = self.video_instances[index]
ts, fps = io.read_video_timestamps(instance, pts_unit=self.pts_unit)
return ts, fps
def _make_dataset(
directory,
extensions=None,
is_valid_file=None,
pts_unit="sec",
tqdm_args=None,
num_workers: int = 0,
):
"""Returns a list of all video files, timestamps, and offsets.
Args:
directory:
Root directory path (should not contain subdirectories).
extensions:
Tuple of valid extensions.
is_valid_file:
Used to find valid files.
pts_unit:
Unit of the timestamps.
tqdm_args:
arguments to pass to tqdm
num_workers:
number of workers to use for multithreading
Returns:
A list of video files, timestamps, frame offsets, and fps.
"""
if tqdm_args is None:
tqdm_args = {}
if extensions is None:
if is_valid_file is None:
ValueError("Both extensions and is_valid_file cannot be None")
else:
_is_valid_file = is_valid_file
else:
def is_valid_file_extension(filepath):
return filepath.lower().endswith(extensions)
if is_valid_file is None:
_is_valid_file = is_valid_file_extension
else:
def _is_valid_file(filepath):
return is_valid_file_extension(filepath) and is_valid_file(filepath)
# find all video instances (no subdirectories)
video_instances = []
def on_error(error):
raise error
for root, _, files in os.walk(directory, onerror=on_error):
for fname in files:
# skip invalid files
if not _is_valid_file(os.path.join(root, fname)):
continue
# keep track of valid files
path = os.path.join(root, fname)
video_instances.append(path)
# define loader to get the timestamps
num_workers = min(num_workers, len(video_instances))
if len(video_instances) == 1:
num_workers = 0
loader = DataLoader(
_TimestampFpsFromVideosDataset(video_instances, pts_unit=pts_unit),
num_workers=num_workers,
batch_size=None,
shuffle=False,
)
# actually load the data
tqdm_args = dict(tqdm_args)
tqdm_args.setdefault("unit", " video")
tqdm_args.setdefault("desc", "Counting frames in videos")
timestamps_fpss = list(tqdm(loader, **tqdm_args))
timestamps, fpss = zip(*timestamps_fpss)
# get frame offsets
frame_counts = [len(ts) for ts in timestamps]
offsets = [0] + list(np.cumsum(frame_counts[:-1]))
return video_instances, timestamps, offsets, fpss
def _find_non_increasing_timestamps(timestamps: List[Fraction]) -> List[bool]:
"""Finds all non-increasing timestamps.
Arguments:
timestamps:
Video frame timestamps.
Returns:
A boolean for each input timestamp which is True if the timestamp is
non-increasing and False otherwise.
"""
if len(timestamps) == 0:
return []
is_non_increasing = np.zeros(
shape=len(timestamps),
dtype=bool,
)
max_timestamp = timestamps[0] - 1
for i, timestamp in enumerate(timestamps):
if timestamp > max_timestamp:
max_timestamp = timestamp
else:
is_non_increasing[i] = True
return list(is_non_increasing)
class VideoDataset(datasets.VisionDataset):
"""Implementation of a video dataset.
The VideoDataset allows random reads from a video file without extracting
all frames beforehand. This is more storage efficient but is slower.
Attributes:
root:
Root directory path.
extensions:
Tuple of allowed extensions.
transform:
Function that takes a PIL image and returns transformed version
target_transform:
As transform but for targets
is_valid_file:
Used to check corrupt files
exception_on_non_increasing_timestamp:
If True, a NonIncreasingTimestampError is raised when trying to load
a frame that has a timestamp lower or equal to the timestamps of
previous frames in the same video.
"""
def __init__(
self,
root,
extensions=None,
transform=None,
target_transform=None,
is_valid_file=None,
exception_on_non_increasing_timestamp=True,
tqdm_args: Dict[str, Any] = None,
num_workers: int = 0,
):
super(VideoDataset, self).__init__(
root, transform=transform, target_transform=target_transform
)
videos, video_timestamps, offsets, fps = _make_dataset(
self.root,
extensions,
is_valid_file,
tqdm_args=tqdm_args,
num_workers=num_workers,
)
if len(videos) == 0:
msg = "Found 0 videos in folder: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.extensions = extensions
self.backend = torchvision.get_video_backend()
self.exception_on_non_increasing_timestamp = (
exception_on_non_increasing_timestamp
)
self.videos = videos
self.video_timestamps = video_timestamps
self._length = sum((len(ts) for ts in self.video_timestamps))
# Boolean value for every timestamp in self.video_timestamps. If True
# the timestamp of the frame is non-increasing compared to timestamps of
# previous frames in the video.
self.video_timestamps_is_non_increasing = [
_find_non_increasing_timestamps(timestamps)
for timestamps in video_timestamps
]
# offsets[i] indicates the index of the first frame of the i-th video.
# e.g. for two videos of length 10 and 20, the offsets will be [0, 10].
self.offsets = offsets
self.fps = fps
# Current VideoLoader instance and the corresponding video index. We
# only keep track of the last accessed video as this is a good trade-off
# between speed and memory requirements.
# See https://github.com/lightly-ai/lightly/pull/702 for details.
self._video_loader = None
self._video_index = None
# Keep unique reference of dataloader worker. We need this to avoid
# accidentaly sharing VideoLoader instances between workers.
self._worker_ref = None
# Lock to prevent multiple threads creating a new VideoLoader at the
# same time.
self._video_loader_lock = threading.Lock()
def __getitem__(self, index):
"""Returns item at index.
Finds the video of the frame at index with the help of the frame
offsets. Then, loads the frame from the video, applies the transforms,
and returns the frame along with the index of the video (as target).
For example, if there are two videos with 10 and 20 frames respectively
in the input directory:
Requesting the 5th sample returns the 5th frame from the first video and
the target indicates the index of the source video which is 0.
>>> dataset[5]
>>> > <PIL Image>, 0
Requesting the 20th sample returns the 10th frame from the second video
and the target indicates the index of the source video which is 1.
>>> dataset[20]
>>> > <PIL Image>, 1
Args:
index:
Index of the sample to retrieve.
Returns:
A tuple (sample, target) where target indicates the video index.
Raises:
IndexError:
If index is out of bounds.
VideoError:
If the frame at the given index could not be loaded.
"""
if index < 0 or index >= self.__len__():
raise IndexError(
f"Index {index} is out of bounds for VideoDataset"
f" of size {self.__len__()}."
)
# each sample belongs to a video, to load the sample at index, we need
# to find the video to which the sample belongs and then read the frame
# from this video on the disk.
i = len(self.offsets) - 1
while self.offsets[i] > index:
i = i - 1
timestamp_idx = index - self.offsets[i]
if (
self.exception_on_non_increasing_timestamp
and self.video_timestamps_is_non_increasing[i][timestamp_idx]
):
raise NonIncreasingTimestampError(
f"Frame {timestamp_idx} of video {self.videos[i]} has "
f"a timestamp that is equal or lower than timestamps of previous "
f"frames in the video. Trying to load this frame might result "
f"in the wrong frame being returned. Set the VideoDataset.exception_on_non_increasing_timestamp"
f"attribute to False to allow unsafe frame loading."
)
# find and return the frame as PIL image
frame_timestamp = self.video_timestamps[i][timestamp_idx]
video_loader = self._get_video_loader(i)
sample = video_loader.read_frame(frame_timestamp)
target = i
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
"""Returns the number of samples (frames) in the dataset.
This can be precomputed, because self.video_timestamps is only
set in the __init__
"""
return self._length
def get_filename(self, index):
"""Returns a filename for the frame at index.
The filename is created from the video filename, the frame number, and
the video format. The frame number will be zero padded to make sure
all filenames have the same length and can easily be sorted.
E.g. when retrieving a sample from the video
`my_video.mp4` at frame 153, the filename will be:
>>> my_video-153-mp4.png
Args:
index:
Index of the frame to retrieve.
Returns:
The filename of the frame as described above.
"""
if index < 0 or index >= self.__len__():
raise IndexError(
f"Index {index} is out of bounds for VideoDataset"
f" of size {self.__len__()}."
)
# each sample belongs to a video, to load the sample at index, we need
# to find the video to which the sample belongs and then read the frame
# from this video on the disk.
i = len(self.offsets) - 1
while self.offsets[i] > index:
i = i - 1
# get filename of the video file
video = self.videos[i]
video_name, video_format = self._video_name_format(video)
# get frame number
frame_number = index - self.offsets[i]
n_frames = self._video_frame_count(i)
zero_padding = len(str(n_frames))
return self._format_filename(
video_name=video_name,
video_format=video_format,
frame_number=frame_number,
zero_padding=zero_padding,
)
def get_filenames(self) -> List[str]:
"""Returns a list filenames for all frames in the dataset."""
filenames = []
for i, video in enumerate(self.videos):
video_name, video_format = self._video_name_format(video)
n_frames = self._video_frame_count(i)
zero_padding = len(str(n_frames))
for frame_number in range(n_frames):
filenames.append(
self._format_filename(
video_name=video_name,
frame_number=frame_number,
video_format=video_format,
zero_padding=zero_padding,
)
)
return filenames
def _video_frame_count(self, video_index: int) -> int:
"""Returns the number of frames in the video with the given index."""
if video_index < len(self.offsets) - 1:
n_frames = self.offsets[video_index + 1] - self.offsets[video_index]
else:
n_frames = len(self) - self.offsets[video_index]
return n_frames
def _video_name_format(self, video_filename: str) -> Tuple[str, str]:
"""Extracts name and format from the filename of the video.
Returns:
A (video_name, video_format) tuple where video_name is the filename
relative to self.root and video_format is the file extension, for
example 'mp4'.
"""
video_filename = os.path.relpath(video_filename, self.root)
splits = video_filename.split(".")
video_format = splits[-1]
video_name = ".".join(splits[:-1])
return video_name, video_format
def _format_filename(
self,
video_name: str,
frame_number: int,
video_format: str,
zero_padding: int = 8,
extension: str = "png",
) -> str:
return f"{video_name}-{frame_number:0{zero_padding}}-{video_format}.{extension}"
def _get_video_loader(self, video_index: int) -> VideoLoader:
"""Returns a video loader unique to the current dataloader worker."""
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
# Use a weakref instead of worker_info.id as the worker id is reused
# by different workers across epochs.
worker_ref = weakref.ref(worker_info)
if worker_ref != self._worker_ref:
# This worker has never accessed the dataset before, we have to
# reset the video loader.
self._video_loader = None
self._video_index = None
self._worker_ref = worker_ref
with self._video_loader_lock:
if video_index != self._video_index:
video = self.videos[video_index]
timestamps = self.video_timestamps[video_index]
self._video_loader = VideoLoader(
video, timestamps, backend=self.backend
)
self._video_index = video_index
return self._video_loader
| 25,874 | 34.204082 | 112 | py |
lightly | lightly-master/lightly/data/collate.py | """ Collate Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import math
from multiprocessing import Value
from typing import List, Optional, Tuple, Union
from warnings import warn
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as T
from PIL import Image
from lightly.transforms import GaussianBlur, Jigsaw, RandomSolarization
from lightly.transforms.random_crop_and_flip_with_grid import RandomResizedCropAndFlip
from lightly.transforms.rotation import random_rotation_transform
from lightly.transforms.utils import IMAGENET_NORMALIZE
imagenet_normalize = IMAGENET_NORMALIZE
# Kept for backwards compatibility
class BaseCollateFunction(nn.Module):
"""Base class for other collate implementations.
Takes a batch of images as input and transforms each image into two
different augmentations with the help of random transforms. The images are
then concatenated such that the output batch is exactly twice the length
of the input batch.
Attributes:
transform:
A set of torchvision transforms which are randomly applied to
each image.
"""
def __init__(self, transform: torchvision.transforms.Compose):
_deprecation_warning_collate_functions()
super(BaseCollateFunction, self).__init__()
self.transform = transform
def forward(self, batch: List[Tuple[Image.Image, int, str]]):
"""Turns a batch of tuples into a tuple of batches.
Args:
batch:
A batch of tuples of images, labels, and filenames which
is automatically provided if the dataloader is built from
a LightlyDataset.
Returns:
A tuple of images, labels, and filenames. The images consist of
two batches corresponding to the two transformations of the
input images.
Examples:
>>> # define a random transformation and the collate function
>>> transform = ... # some random augmentations
>>> collate_fn = BaseCollateFunction(transform)
>>>
>>> # input is a batch of tuples (here, batch_size = 1)
>>> input = [(img, 0, 'my-image.png')]
>>> output = collate_fn(input)
>>>
>>> # output consists of two random transforms of the images,
>>> # the labels, and the filenames in the batch
>>> (img_t0, img_t1), label, filename = output
"""
batch_size = len(batch)
# list of transformed images
transforms = [
self.transform(batch[i % batch_size][0]).unsqueeze_(0)
for i in range(2 * batch_size)
]
# list of labels
labels = torch.LongTensor([item[1] for item in batch])
# list of filenames
fnames = [item[2] for item in batch]
# tuple of transforms
transforms = (
torch.cat(transforms[:batch_size], 0),
torch.cat(transforms[batch_size:], 0),
)
return transforms, labels, fnames
class ImageCollateFunction(BaseCollateFunction):
"""Implementation of a collate function for images.
This is an implementation of the BaseCollateFunction with a concrete
set of transforms.
The set of transforms is inspired by the SimCLR paper as it has shown
to produce powerful embeddings.
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 64,
cj_prob: float = 0.8,
cj_bright: float = 0.7,
cj_contrast: float = 0.7,
cj_sat: float = 0.7,
cj_hue: float = 0.2,
min_scale: float = 0.15,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: dict = imagenet_normalize,
):
if isinstance(input_size, tuple):
input_size_ = max(input_size)
else:
input_size_ = input_size
color_jitter = T.ColorJitter(cj_bright, cj_contrast, cj_sat, cj_hue)
transform = [
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
transform = T.Compose(transform)
super(ImageCollateFunction, self).__init__(transform)
class MultiViewCollateFunction(nn.Module):
"""Generates multiple views for each image in the batch.
Attributes:
transforms:
List of transformation functions. Each function is used to generate
one view of the back.
"""
def __init__(self, transforms: List[torchvision.transforms.Compose]):
_deprecation_warning_collate_functions()
super().__init__()
self.transforms = transforms
def forward(self, batch: List[tuple]):
"""Turns a batch of tuples into a tuple of batches.
Args:
batch:
The input batch.
Returns:
A (views, labels, fnames) tuple where views is a list of tensors
with each tensor containing one view of the batch.
"""
views = []
for transform in self.transforms:
view = torch.stack([transform(img) for img, _, _ in batch])
views.append(view)
# list of labels
labels = torch.LongTensor([label for _, label, _ in batch])
# list of filenames
fnames = [fname for _, _, fname in batch]
return views, labels, fnames
class SimCLRCollateFunction(ImageCollateFunction):
"""Implements the transformations for SimCLR.
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
Examples:
>>> # SimCLR for ImageNet
>>> collate_fn = SimCLRCollateFunction()
>>>
>>> # SimCLR for CIFAR-10
>>> collate_fn = SimCLRCollateFunction(
>>> input_size=32,
>>> gaussian_blur=0.,
>>> )
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 0.5,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: dict = imagenet_normalize,
):
super(SimCLRCollateFunction, self).__init__(
input_size=input_size,
cj_prob=cj_prob,
cj_bright=cj_strength * 0.8,
cj_contrast=cj_strength * 0.8,
cj_sat=cj_strength * 0.8,
cj_hue=cj_strength * 0.2,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
class MoCoCollateFunction(ImageCollateFunction):
"""Implements the transformations for MoCo v1.
For MoCo v2, simply use the SimCLR settings.
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
Examples:
>>> # MoCo v1 for ImageNet
>>> collate_fn = MoCoCollateFunction()
>>>
>>> # MoCo v1 for CIFAR-10
>>> collate_fn = MoCoCollateFunction(
>>> input_size=32,
>>> )
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_strength: float = 0.4,
min_scale: float = 0.2,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.0,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: dict = imagenet_normalize,
):
super(MoCoCollateFunction, self).__init__(
input_size=input_size,
cj_prob=cj_prob,
cj_bright=cj_strength,
cj_contrast=cj_strength,
cj_sat=cj_strength,
cj_hue=cj_strength,
min_scale=min_scale,
random_gray_scale=random_gray_scale,
gaussian_blur=gaussian_blur,
kernel_size=kernel_size,
sigmas=sigmas,
vf_prob=vf_prob,
hf_prob=hf_prob,
rr_prob=rr_prob,
rr_degrees=rr_degrees,
normalize=normalize,
)
class MultiCropCollateFunction(MultiViewCollateFunction):
"""Implements the multi-crop transformations for SwaV.
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
transforms:
Transforms which are applied to all crops.
"""
def __init__(
self,
crop_sizes: List[int],
crop_counts: List[int],
crop_min_scales: List[float],
crop_max_scales: List[float],
transforms: T.Compose,
):
if len(crop_sizes) != len(crop_counts):
raise ValueError(
"Length of crop_sizes and crop_counts must be equal but are"
f" {len(crop_sizes)} and {len(crop_counts)}."
)
if len(crop_sizes) != len(crop_min_scales):
raise ValueError(
"Length of crop_sizes and crop_min_scales must be equal but are"
f" {len(crop_sizes)} and {len(crop_min_scales)}."
)
if len(crop_sizes) != len(crop_min_scales):
raise ValueError(
"Length of crop_sizes and crop_max_scales must be equal but are"
f" {len(crop_sizes)} and {len(crop_min_scales)}."
)
crop_transforms = []
for i in range(len(crop_sizes)):
random_resized_crop = T.RandomResizedCrop(
crop_sizes[i], scale=(crop_min_scales[i], crop_max_scales[i])
)
crop_transforms.extend(
[
T.Compose(
[
random_resized_crop,
transforms,
]
)
]
* crop_counts[i]
)
super().__init__(crop_transforms)
class SwaVCollateFunction(MultiCropCollateFunction):
"""Implements the multi-crop transformations for SwaV.
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
hf_prob:
Probability that horizontal flip is applied.
vf_prob:
Probability that vertical flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
Examples:
>>> # SwaV for Imagenet
>>> collate_fn = SwaVCollateFunction()
>>>
>>> # SwaV w/ 2x160 and 4x96 crops
>>> collate_fn = SwaVCollateFunction(
>>> crop_sizes=[160, 96],
>>> crop_counts=[2, 4],
>>> )
"""
def __init__(
self,
crop_sizes: List[int] = [224, 96],
crop_counts: List[int] = [2, 6],
crop_min_scales: List[float] = [0.14, 0.05],
crop_max_scales: List[float] = [1.0, 0.14],
hf_prob: float = 0.5,
vf_prob: float = 0.0,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
cj_prob: float = 0.8,
cj_strength: float = 0.8,
random_gray_scale: float = 0.2,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
normalize: dict = imagenet_normalize,
):
color_jitter = T.ColorJitter(
cj_strength,
cj_strength,
cj_strength,
cj_strength / 4.0,
)
transforms = T.Compose(
[
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.ColorJitter(),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur
),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
super(SwaVCollateFunction, self).__init__(
crop_sizes=crop_sizes,
crop_counts=crop_counts,
crop_min_scales=crop_min_scales,
crop_max_scales=crop_max_scales,
transforms=transforms,
)
class DINOCollateFunction(MultiViewCollateFunction):
"""Implements the global and local view augmentations for DINO [0].
This class generates two global and a user defined number of local views
for each image in a batch. The code is adapted from [1].
- [0]: DINO, 2021, https://arxiv.org/abs/2104.14294
- [1]: https://github.com/facebookresearch/dino
Attributes:
global_crop_size:
Crop size of the global views.
global_crop_scale:
Tuple of min and max scales relative to global_crop_size.
local_crop_size:
Crop size of the local views.
local_crop_scale:
Tuple of min and max scales relative to local_crop_size.
n_local_views:
Number of generated local views.
hf_prob:
Probability that horizontal flip is applied.
vf_prob:
Probability that vertical flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
cj_prob:
Probability that color jitter is applied.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
random_gray_scale:
Probability of conversion to grayscale.
gaussian_blur:
Tuple of probabilities to apply gaussian blur on the different
views. The input is ordered as follows:
(global_view_0, global_view_1, local_views)
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
kernel_scale:
Old argument. Value is deprecated in favor of sigmas. If set, the old behavior applies and `sigmas` is ignored.
Used to scale the `kernel_size` of a factor of `kernel_scale`
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
solarization:
Probability to apply solarization on the second global view.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
global_crop_size=224,
global_crop_scale=(0.4, 1.0),
local_crop_size=96,
local_crop_scale=(0.05, 0.4),
n_local_views=6,
hf_prob=0.5,
vf_prob=0,
rr_prob=0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
cj_prob=0.8,
cj_bright=0.4,
cj_contrast=0.4,
cj_sat=0.2,
cj_hue=0.1,
random_gray_scale=0.2,
gaussian_blur=(1.0, 0.1, 0.5),
kernel_size: Optional[float] = None,
kernel_scale: Optional[float] = None,
sigmas: Tuple[float, float] = (0.1, 2),
solarization_prob=0.2,
normalize=imagenet_normalize,
):
flip_and_color_jitter = T.Compose(
[
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomApply(
[
T.ColorJitter(
brightness=cj_bright,
contrast=cj_contrast,
saturation=cj_sat,
hue=cj_hue,
)
],
p=cj_prob,
),
T.RandomGrayscale(p=random_gray_scale),
]
)
normalize = T.Compose(
[
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
global_crop = T.RandomResizedCrop(
global_crop_size,
scale=global_crop_scale,
interpolation=Image.BICUBIC,
)
# first global crop
global_transform_0 = T.Compose(
[
global_crop,
flip_and_color_jitter,
GaussianBlur(
kernel_size=kernel_size,
scale=kernel_scale,
sigmas=sigmas,
prob=gaussian_blur[0],
),
normalize,
]
)
# second global crop
global_transform_1 = T.Compose(
[
global_crop,
flip_and_color_jitter,
GaussianBlur(
kernel_size=kernel_size,
scale=kernel_scale,
sigmas=sigmas,
prob=gaussian_blur[1],
),
RandomSolarization(prob=solarization_prob),
normalize,
]
)
# transformation for the local small crops
local_transform = T.Compose(
[
T.RandomResizedCrop(
local_crop_size, scale=local_crop_scale, interpolation=Image.BICUBIC
),
flip_and_color_jitter,
GaussianBlur(
kernel_size=kernel_size,
scale=kernel_scale,
sigmas=sigmas,
prob=gaussian_blur[2],
),
normalize,
]
)
local_transforms = [local_transform] * n_local_views
transforms = [global_transform_0, global_transform_1]
transforms.extend(local_transforms)
super().__init__(transforms)
class MAECollateFunction(MultiViewCollateFunction):
"""Implements the view augmentation for MAE [0].
- [0]: Masked Autoencoder, 2021, https://arxiv.org/abs/2111.06377
Attributes:
input_size:
Size of the input image in pixels.
min_scale:
Minimum size of the randomized crop relative to the input_size.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: Union[int, Tuple[int, int]] = 224,
min_scale: float = 0.2,
normalize: dict = imagenet_normalize,
):
transforms = [
T.RandomResizedCrop(
input_size, scale=(min_scale, 1.0), interpolation=3
), # 3 is bicubic
T.RandomHorizontalFlip(),
T.ToTensor(),
]
if normalize:
transforms.append(T.Normalize(mean=normalize["mean"], std=normalize["std"]))
super().__init__([T.Compose(transforms)])
def forward(self, batch: List[tuple]):
views, labels, fnames = super().forward(batch)
# Return only first view as MAE needs only a single view per image.
return views[0], labels, fnames
class PIRLCollateFunction(nn.Module):
"""Implements the transformations for PIRL [0]. The jigsaw augmentation
is applied during the forward pass.
- [0] PIRL, 2019: https://arxiv.org/abs/1912.01991
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
hf_prob:
Probability that horizontal flip is applied.
n_grid:
Sqrt of the number of grids in the jigsaw image.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
Examples:
>>> # PIRL for ImageNet
>>> collate_fn = PIRLCollateFunction()
>>>
>>> # PIRL for CIFAR-10
>>> collate_fn = PIRLCollateFunction(
>>> input_size=32,
>>> )
"""
def __init__(
self,
input_size: int = 64,
cj_prob: float = 0.8,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.4,
cj_hue: float = 0.4,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
hf_prob: float = 0.5,
n_grid: int = 3,
normalize: dict = imagenet_normalize,
):
_deprecation_warning_collate_functions()
super(PIRLCollateFunction, self).__init__()
if isinstance(input_size, tuple):
input_size_ = max(input_size)
else:
input_size_ = input_size
color_jitter = T.ColorJitter(cj_bright, cj_contrast, cj_sat, cj_hue)
# Transform for transformed jigsaw image
transform = [
T.RandomHorizontalFlip(p=hf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
# Cropping and normalisation for untransformed image
self.no_augment = T.Compose(
[
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
self.jigsaw = Jigsaw(
n_grid=n_grid,
img_size=input_size_,
crop_size=int(input_size_ // n_grid),
transform=T.Compose(transform),
)
def forward(self, batch: List[tuple]):
"""Overriding the BaseCollateFunction class's forward method because
for PIRL we need only one augmented batch, as opposed to both, which the
BaseCollateFunction creates."""
batch_size = len(batch)
# list of transformed images
img_transforms = [
self.jigsaw(batch[i][0]).unsqueeze_(0) for i in range(batch_size)
]
img = [self.no_augment(batch[i][0]).unsqueeze_(0) for i in range(batch_size)]
# list of labels
labels = torch.LongTensor([item[1] for item in batch])
# list of filenames
fnames = [item[2] for item in batch]
# tuple of transforms
transforms = (torch.cat(img, 0), torch.cat(img_transforms, 0))
return transforms, labels, fnames
class MSNCollateFunction(MultiViewCollateFunction):
"""Implements the transformations for MSN [0].
Generates a set of random and focal views for each input image. The generated output
is (views, target, filenames) where views is list with the following entries:
[random_views_0, random_views_1, ..., focal_views_0, focal_views_1, ...].
- [0]: Masked Siamese Networks, 2022: https://arxiv.org/abs/2204.07141
Attributes:
random_size:
Size of the random image views in pixels.
focal_size:
Size of the focal image views in pixels.
random_views:
Number of random views to generate.
focal_views:
Number of focal views to generate.
random_crop_scale:
Minimum and maximum size of the randomized crops for the relative to random_size.
focal_crop_scale:
Minimum and maximum size of the randomized crops relative to focal_size.
cj_prob:
Probability that color jittering is applied.
cj_strength:
Strength of the color jitter.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
random_gray_scale:
Probability of conversion to grayscale.
hf_prob:
Probability that horizontal flip is applied.
vf_prob:
Probability that vertical flip is applied.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
random_size: int = 224,
focal_size: int = 96,
random_views: int = 2,
focal_views: int = 10,
random_crop_scale: Tuple[float, float] = (0.3, 1.0),
focal_crop_scale: Tuple[float, float] = (0.05, 0.3),
cj_prob: float = 0.8,
cj_strength: float = 1.0,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
random_gray_scale: float = 0.2,
hf_prob: float = 0.5,
vf_prob: float = 0.0,
normalize: dict = imagenet_normalize,
) -> None:
color_jitter = T.ColorJitter(
brightness=0.8 * cj_strength,
contrast=0.8 * cj_strength,
saturation=0.8 * cj_strength,
hue=0.2 * cj_strength,
)
transform = T.Compose(
[
T.RandomResizedCrop(size=random_size, scale=random_crop_scale),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur
),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
focal_transform = T.Compose(
[
T.RandomResizedCrop(size=focal_size, scale=focal_crop_scale),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur
),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
transforms = [transform] * random_views
transforms += [focal_transform] * focal_views
super().__init__(transforms=transforms)
class SMoGCollateFunction(MultiViewCollateFunction):
"""Implements the transformations for SMoG.
Attributes:
crop_sizes:
Size of the input image in pixels for each crop category.
crop_counts:
Number of crops for each crop category.
crop_min_scales:
Min scales for each crop category.
crop_max_scales:
Max_scales for each crop category.
gaussian_blur_probs:
Probability of Gaussian blur for each crop category.
gaussian_blur_kernel_sizes:
Deprecated values in favour of sigmas.
gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
solarize_probs:
Probability of solarization for each crop category.
hf_prob:
Probability that horizontal flip is applied.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter.
random_gray_scale:
Probability of conversion to grayscale.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
crop_sizes: List[int] = [224, 96],
crop_counts: List[int] = [4, 4],
crop_min_scales: List[float] = [0.2, 0.05],
crop_max_scales: List[float] = [1.0, 0.2],
gaussian_blur_probs: List[float] = [0.5, 0.1],
gaussian_blur_kernel_sizes: Optional[List[float]] = [None, None],
gaussian_blur_sigmas: Tuple[float, float] = (0.2, 2),
solarize_probs: List[float] = [0.0, 0.2],
hf_prob: float = 0.5,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
random_gray_scale: float = 0.2,
normalize: dict = imagenet_normalize,
):
transforms = []
for i in range(len(crop_sizes)):
random_resized_crop = T.RandomResizedCrop(
crop_sizes[i], scale=(crop_min_scales[i], crop_max_scales[i])
)
color_jitter = T.ColorJitter(
0.8 * cj_strength,
0.8 * cj_strength,
0.4 * cj_strength,
0.2 * cj_strength,
)
transforms.extend(
[
T.Compose(
[
random_resized_crop,
T.RandomHorizontalFlip(p=hf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=gaussian_blur_kernel_sizes[i],
prob=gaussian_blur_probs[i],
sigmas=gaussian_blur_sigmas,
), # TODO
RandomSolarization(prob=solarize_probs[i]),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
]
* crop_counts[i]
)
super().__init__(transforms)
class VICRegCollateFunction(BaseCollateFunction):
"""Implementation of a collate function for images.
This is an implementation of the BaseCollateFunction with a concrete
set of transforms.
The set of transforms is inspired by the SimCLR paper as it has shown
to produce powerful embeddings.
Attributes:
input_size:
Size of the input image in pixels.
cj_prob:
Probability that color jitter is applied.
cj_bright:
How much to jitter brightness.
cj_contrast:
How much to jitter constrast.
cj_sat:
How much to jitter saturation.
cj_hue:
How much to jitter hue.
min_scale:
Minimum size of the randomized crop relative to the input_size.
random_gray_scale:
Probability of conversion to grayscale.
solarize_prob:
Probability of solarization.
gaussian_blur:
Probability of Gaussian blur.
kernel_size:
Will be deprecated in favor of `sigmas` argument. If set, the old behavior applies and `sigmas` is ignored.
Used to calculate sigma of gaussian blur with kernel_size * input_size.
sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `kernel_size` is set.
vf_prob:
Probability that vertical flip is applied.
hf_prob:
Probability that horizontal flip is applied.
rr_prob:
Probability that random rotation is applied.
rr_degrees:
Range of degrees to select from for random rotation. If rr_degrees is None,
images are rotated by 90 degrees. If rr_degrees is a (min, max) tuple,
images are rotated by a random angle in [min, max]. If rr_degrees is a
single number, images are rotated by a random angle in
[-rr_degrees, +rr_degrees]. All rotations are counter-clockwise.
normalize:
Dictionary with 'mean' and 'std' for torchvision.transforms.Normalize.
"""
def __init__(
self,
input_size: int = 224,
cj_prob: float = 0.8,
cj_bright: float = 0.4,
cj_contrast: float = 0.4,
cj_sat: float = 0.2,
cj_hue: float = 0.1,
min_scale: float = 0.08,
random_gray_scale: float = 0.2,
solarize_prob: float = 0.1,
gaussian_blur: float = 0.5,
kernel_size: Optional[float] = None,
sigmas: Tuple[float, float] = (0.2, 2),
vf_prob: float = 0.0,
hf_prob: float = 0.5,
rr_prob: float = 0.0,
rr_degrees: Union[None, float, Tuple[float, float]] = None,
normalize: dict = imagenet_normalize,
):
if isinstance(input_size, tuple):
input_size_ = max(input_size)
else:
input_size_ = input_size
color_jitter = T.ColorJitter(cj_bright, cj_contrast, cj_sat, cj_hue)
transform = [
T.RandomResizedCrop(size=input_size, scale=(min_scale, 1.0)),
random_rotation_transform(rr_prob=rr_prob, rr_degrees=rr_degrees),
T.RandomHorizontalFlip(p=hf_prob),
T.RandomVerticalFlip(p=vf_prob),
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
RandomSolarization(prob=solarize_prob),
GaussianBlur(kernel_size=kernel_size, sigmas=sigmas, prob=gaussian_blur),
T.ToTensor(),
]
if normalize:
transform += [T.Normalize(mean=normalize["mean"], std=normalize["std"])]
transform = T.Compose(transform)
super(VICRegCollateFunction, self).__init__(transform)
class VICRegLCollateFunction(nn.Module):
"""Transforms images for VICRegL.
Attributes:
global_crop_size:
Size of the input image in pixels for the global crop category.
local_crop_size:
Size of the input image in pixels for the local crop category.
global_crop_scale:
Min and max scales for the global crop category.
local_crop_scale:
Min and max scales for the local crop category.
global_grid_size:
Grid size for the global crop category.
local_grid_size:
Grid size for the local crop category.
global_gaussian_blur_prob:
Probability of Gaussian blur for the global crop category.
local_gaussian_blur_prob:
Probability of Gaussian blur for the local crop category.
global_gaussian_blur_kernel_size:
Will be deprecated in favor of `global_gaussian_blur_sigmas` argument. If set, the old behavior applies and `global_gaussian_blur_sigmas` is ignored.
Used to calculate sigma of gaussian blur with global_gaussian_blur_kernel_size * input_size. Applied to global crop category.
local_gaussian_blur_kernel_size:
Will be deprecated in favor of `local_gaussian_blur_sigmas` argument. If set, the old behavior applies and `local_gaussian_blur_sigmas` is ignored.
Used to calculate sigma of gaussian blur with local_gaussian_blur_kernel_size * input_size. Applied to local crop category.
global_gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `global_gaussian_blur_kernel_size` is set. Applied to global crop category.
local_gaussian_blur_sigmas:
Tuple of min and max value from which the std of the gaussian kernel is sampled.
Is ignored if `local_gaussian_blur_kernel_size` is set. Applied to local crop category.
global_solarize_prob:
Probability of solarization for the global crop category.
local_solarize_prob:
Probability of solarization for the local crop category.
hf_prob:
Probability that horizontal flip is applied.
cj_prob:
Probability that color jitter is applied.
cj_strength:
Strength of the color jitter.
random_gray_scale:
Probability of conversion to grayscale.
normalize:
Dictionary with mean and standard deviation for normalization.
"""
def __init__(
self,
global_crop_size: int = 224,
local_crop_size: int = 96,
global_crop_scale: Tuple[int] = (0.2, 1.0),
local_crop_scale: Tuple[int] = (0.05, 0.2),
global_grid_size: int = 7,
local_grid_size: int = 3,
global_gaussian_blur_prob: float = 0.5,
local_gaussian_blur_prob: float = 0.1,
global_gaussian_blur_kernel_size: Optional[float] = None,
local_gaussian_blur_kernel_size: Optional[float] = None,
global_gaussian_blur_sigmas: Tuple[float, float] = (0.2, 2),
local_gaussian_blur_sigmas: Tuple[float, float] = (0.2, 2),
global_solarize_prob: float = 0.0,
local_solarize_prob: float = 0.2,
hf_prob: float = 0.5,
cj_prob: float = 1.0,
cj_strength: float = 0.5,
random_gray_scale: float = 0.2,
normalize: dict = imagenet_normalize,
):
_deprecation_warning_collate_functions()
super().__init__()
self.global_crop_and_flip = RandomResizedCropAndFlip(
crop_size=global_crop_size,
crop_min_scale=global_crop_scale[0],
crop_max_scale=global_crop_scale[1],
hf_prob=hf_prob,
grid_size=global_grid_size,
)
self.local_crop_and_flip = RandomResizedCropAndFlip(
crop_size=local_crop_size,
crop_min_scale=local_crop_scale[0],
crop_max_scale=local_crop_scale[1],
hf_prob=hf_prob,
grid_size=local_grid_size,
)
color_jitter = T.ColorJitter(
0.8 * cj_strength,
0.8 * cj_strength,
0.4 * cj_strength,
0.2 * cj_strength,
)
self.global_transform = T.Compose(
[
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=global_gaussian_blur_kernel_size,
prob=global_gaussian_blur_prob,
sigmas=global_gaussian_blur_sigmas,
),
RandomSolarization(prob=global_solarize_prob),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
self.local_transform = T.Compose(
[
T.RandomApply([color_jitter], p=cj_prob),
T.RandomGrayscale(p=random_gray_scale),
GaussianBlur(
kernel_size=local_gaussian_blur_kernel_size,
prob=local_gaussian_blur_prob,
sigmas=local_gaussian_blur_sigmas,
),
RandomSolarization(prob=local_solarize_prob),
T.ToTensor(),
T.Normalize(mean=normalize["mean"], std=normalize["std"]),
]
)
def forward(
self, batch: List[Tuple[Image.Image, int, str]]
) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
torch.Tensor,
torch.Tensor,
]:
"""
Applies transforms to images in the input batch.
Args:
batch:
A list of tuples containing an image (as a PIL Image),
a label (int), and a filename (str).
Returns:
A tuple of transformed images (as a 4-tuple of torch.Tensors containing view_global, view_local, grid_global, grid_local),
labels (as torch.Tensor), and filenames (as torch.Tensor).
"""
views_global = []
views_local = []
grids_global = []
grids_local = []
labels = []
fnames = []
for image, label, filename in batch:
view_global, grid_global = self.global_crop_and_flip.forward(image)
view_local, grid_local = self.local_crop_and_flip.forward(image)
views_global.append(self.global_transform(view_global))
views_local.append(self.local_transform(view_local))
grids_global.append(grid_global)
grids_local.append(grid_local)
labels.append(torch.LongTensor(label))
fnames.append(filename)
views_global = torch.stack(views_global)
views_local = torch.stack(views_local)
grids_global = torch.stack(grids_global)
grids_local = torch.stack(grids_local)
return (views_global, views_local, grids_global, grids_local), labels, fnames
class IJEPAMaskCollator:
"""Collator for IJEPA model [0].
Experimental: Support for I-JEPA is experimental, there might be breaking changes
in the future.
Code inspired by [1].
- [0]: Joint-Embedding Predictive Architecture, 2023, https://arxiv.org/abs/2301.08243
- [1]: https://github.com/facebookresearch/ijepa
"""
def __init__(
self,
input_size=(224, 224),
patch_size=16,
enc_mask_scale=(0.2, 0.8),
pred_mask_scale=(0.2, 0.8),
aspect_ratio=(0.3, 3.0),
nenc=1,
npred=2,
min_keep=4,
allow_overlap=False,
):
if not isinstance(input_size, tuple):
input_size = (input_size,) * 2
self.patch_size = patch_size
self.height, self.width = (
input_size[0] // patch_size,
input_size[1] // patch_size,
)
self.enc_mask_scale = enc_mask_scale
self.pred_mask_scale = pred_mask_scale
self.aspect_ratio = aspect_ratio
self.nenc = nenc
self.npred = npred
self.min_keep = min_keep # minimum number of patches to keep
self.allow_overlap = (
allow_overlap # whether to allow overlap b/w enc and pred masks
)
self._itr_counter = Value("i", -1) # collator is shared across worker processes
def step(self):
i = self._itr_counter
with i.get_lock():
i.value += 1
v = i.value
return v
def _sample_block_size(self, generator, scale, aspect_ratio_scale):
_rand = torch.rand(1, generator=generator).item()
# -- Sample block scale
min_s, max_s = scale
mask_scale = min_s + _rand * (max_s - min_s)
max_keep = int(self.height * self.width * mask_scale)
# -- Sample block aspect-ratio
min_ar, max_ar = aspect_ratio_scale
aspect_ratio = min_ar + _rand * (max_ar - min_ar)
# -- Compute block height and width (given scale and aspect-ratio)
h = int(round(math.sqrt(max_keep * aspect_ratio)))
w = int(round(math.sqrt(max_keep / aspect_ratio)))
while h >= self.height:
h -= 1
while w >= self.width:
w -= 1
return (h, w)
def _sample_block_mask(self, b_size, acceptable_regions=None):
h, w = b_size
def constrain_mask(mask, tries=0):
"""Helper to restrict given mask to a set of acceptable regions"""
N = max(int(len(acceptable_regions) - tries), 0)
for k in range(N):
mask *= acceptable_regions[k]
# --
# -- Loop to sample masks until we find a valid one
tries = 0
timeout = og_timeout = 20
valid_mask = False
while not valid_mask:
# -- Sample block top-left corner
top = torch.randint(0, self.height - h, (1,))
left = torch.randint(0, self.width - w, (1,))
mask = torch.zeros((self.height, self.width), dtype=torch.int32)
mask[top : top + h, left : left + w] = 1
# -- Constrain mask to a set of acceptable regions
if acceptable_regions is not None:
constrain_mask(mask, tries)
mask = torch.nonzero(mask.flatten())
# -- If mask too small try again
valid_mask = len(mask) > self.min_keep
if not valid_mask:
timeout -= 1
if timeout == 0:
tries += 1
timeout = og_timeout
mask = mask.squeeze()
# --
mask_complement = torch.ones((self.height, self.width), dtype=torch.int32)
mask_complement[top : top + h, left : left + w] = 0
# --
return mask, mask_complement
def __call__(self, batch):
"""
Create encoder and predictor masks when collating imgs into a batch
# 1. sample enc block (size + location) using seed
# 2. sample pred block (size) using seed
# 3. sample several enc block locations for each image (w/o seed)
# 4. sample several pred block locations for each image (w/o seed)
# 5. return enc mask and pred mask
"""
B = len(batch)
collated_batch = torch.utils.data.default_collate(batch)
seed = self.step()
g = torch.Generator()
g.manual_seed(seed)
p_size = self._sample_block_size(
generator=g,
scale=self.pred_mask_scale,
aspect_ratio_scale=self.aspect_ratio,
)
e_size = self._sample_block_size(
generator=g, scale=self.enc_mask_scale, aspect_ratio_scale=(1.0, 1.0)
)
collated_masks_pred, collated_masks_enc = [], []
min_keep_pred = self.height * self.width
min_keep_enc = self.height * self.width
for _ in range(B):
masks_p, masks_C = [], []
for _ in range(self.npred):
mask, mask_C = self._sample_block_mask(p_size)
masks_p.append(mask)
masks_C.append(mask_C)
min_keep_pred = min(min_keep_pred, len(mask))
collated_masks_pred.append(masks_p)
acceptable_regions = masks_C
if self.allow_overlap:
acceptable_regions = None
masks_e = []
for _ in range(self.nenc):
mask, _ = self._sample_block_mask(
e_size, acceptable_regions=acceptable_regions
)
masks_e.append(mask)
min_keep_enc = min(min_keep_enc, len(mask))
collated_masks_enc.append(masks_e)
collated_masks_pred = [
[cm[:min_keep_pred] for cm in cm_list] for cm_list in collated_masks_pred
]
collated_masks_pred = torch.utils.data.default_collate(collated_masks_pred)
# --
collated_masks_enc = [
[cm[:min_keep_enc] for cm in cm_list] for cm_list in collated_masks_enc
]
collated_masks_enc = torch.utils.data.default_collate(collated_masks_enc)
return collated_batch, collated_masks_enc, collated_masks_pred
def _deprecation_warning_collate_functions() -> None:
warn(
"Collate functions are deprecated and will be removed in favor of transforms in v1.4.0.\n"
"See https://docs.lightly.ai/self-supervised-learning/examples/models.html for examples.",
category=DeprecationWarning,
)
| 55,832 | 35.587811 | 161 | py |
lightly | lightly-master/lightly/data/dataset.py | # Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import bisect
import os
import shutil
import tempfile
from typing import Any, Callable, Dict, List, Union
import torchvision.datasets as datasets
from PIL import Image
from torch._C import Value
from torchvision import transforms
from torchvision.datasets.vision import StandardTransform, VisionDataset
from lightly.data._helpers import DatasetFolder, _load_dataset_from_folder
from lightly.data._video import VideoDataset
from lightly.utils.io import check_filenames
def _get_filename_by_index(dataset, index):
"""Default function which maps the index of an image to a filename."""
if isinstance(dataset, datasets.ImageFolder):
# filename is the path of the image relative to the dataset root
full_path = dataset.imgs[index][0]
return os.path.relpath(full_path, dataset.root)
elif isinstance(dataset, DatasetFolder):
# filename is the path of the image relative to the dataset root
full_path = dataset.samples[index][0]
return os.path.relpath(full_path, dataset.root)
elif isinstance(dataset, VideoDataset):
# filename is constructed by the video dataset
return dataset.get_filename(index)
else:
# dummy to prevent crashes
return str(index)
def _ensure_dir(path):
"""Makes sure that the directory at path exists."""
dirname = os.path.dirname(path)
os.makedirs(dirname, exist_ok=True)
def _copy_image(input_dir, output_dir, filename):
"""Copies an image from the input directory to the output directory."""
source = os.path.join(input_dir, filename)
target = os.path.join(output_dir, filename)
_ensure_dir(target)
shutil.copyfile(source, target)
def _save_image(image, output_dir, filename, fmt):
"""Saves an image in the output directory."""
target = os.path.join(output_dir, filename)
_ensure_dir(target)
try:
# try to save the image with the specified format or
# derive the format from the filename (if format=None)
image.save(target, format=fmt)
except ValueError:
# could not determine format from filename
image.save(target, format="png")
def _dump_image(dataset, output_dir, filename, index, fmt):
"""Saves a single image to the output directory.
Will copy the image from the input directory to the output directory
if possible. If not (e.g. for VideoDatasets), will load the image and
then save it to the output directory with the specified format.
"""
if isinstance(dataset, datasets.ImageFolder):
# can safely copy the image from the input to the output directory
_copy_image(dataset.root, output_dir, filename)
elif isinstance(dataset, DatasetFolder):
# can safely copy the image from the input to the output directory
_copy_image(dataset.root, output_dir, filename)
else:
# need to load the image and save it to the output directory
image, _ = dataset[index]
_save_image(image, output_dir, filename, fmt)
class LightlyDataset:
"""Provides a uniform data interface for the embedding models.
Should be used for all models and functions in the lightly package.
Returns a tuple (sample, target, fname) when accessed using __getitem__.
The LightlyDataset supports different input sources. You can use it
on a folder of images. You can also use it on a folder with subfolders
with images (ImageNet style). If the input_dir has subfolders,
each subfolder gets its own target label.
You can also work with videos (requires pyav).
If there are multiple videos in the input_dir each video gets a different
target label assigned. If input_dir contains images and videos
only the videos are used.
Can also be used in combination with the `from_torch_dataset` method
to load a dataset offered by torchvision (e.g. cifar10).
Parameters:
input_dir:
Path to directory holding the images or videos to load.
transform:
Image transforms (as in torchvision).
index_to_filename:
Function which takes the dataset and index as input and returns
the filename of the file at the index. If None, uses default.
filenames:
If not None, it filters the dataset in the input directory
by the given filenames.
Examples:
>>> # load a dataset consisting of images from a local folder
>>> # mydata/
>>> # `- img1.png
>>> # `- img2.png
>>> # `- ...
>>> import lightly.data as data
>>> dataset = data.LightlyDataset(input_dir='path/to/mydata/')
>>> sample, target, fname = dataset[0]
>>>
>>> # also works with subfolders
>>> # mydata/
>>> # `- subfolder1
>>> # `- img1.png
>>> # `- subfolder2
>>> # ...
>>>
>>> # also works with videos
>>> # mydata/
>>> # `- video1.mp4
>>> # `- video2.mp4
>>> # `- ...
"""
def __init__(
self,
input_dir: Union[str, None],
transform: transforms.Compose = None,
index_to_filename: Callable[[datasets.VisionDataset, int], str] = None,
filenames: List[str] = None,
tqdm_args: Dict[str, Any] = None,
num_workers_video_frame_counting: int = 0,
):
# can pass input_dir=None to create an "empty" dataset
self.input_dir = input_dir
if filenames is not None:
filepaths = [os.path.join(input_dir, filename) for filename in filenames]
filepaths = set(filepaths)
def is_valid_file(filepath: str):
return filepath in filepaths
else:
is_valid_file = None
if self.input_dir is not None:
self.dataset = _load_dataset_from_folder(
self.input_dir,
transform,
is_valid_file=is_valid_file,
tqdm_args=tqdm_args,
num_workers_video_frame_counting=num_workers_video_frame_counting,
)
elif transform is not None:
raise ValueError(
"transform must be None when input_dir is None but is " f"{transform}",
)
# initialize function to get filename of image
self.index_to_filename = _get_filename_by_index
if index_to_filename is not None:
self.index_to_filename = index_to_filename
# if created from an input directory with filenames, check if they
# are valid
if input_dir:
check_filenames(self.get_filenames())
@classmethod
def from_torch_dataset(cls, dataset, transform=None, index_to_filename=None):
"""Builds a LightlyDataset from a PyTorch (or torchvision) dataset.
Args:
dataset:
PyTorch/torchvision dataset.
transform:
Image transforms (as in torchvision).
index_to_filename:
Function which takes the dataset and index as input and returns
the filename of the file at the index. If None, uses default.
Returns:
A LightlyDataset object.
Examples:
>>> # load cifar10 from torchvision
>>> import torchvision
>>> import lightly.data as data
>>> base = torchvision.datasets.CIFAR10(root='./')
>>> dataset = data.LightlyDataset.from_torch_dataset(base)
"""
# create an "empty" dataset object
dataset_obj = cls(
None,
index_to_filename=index_to_filename,
)
# populate it with the torch dataset
if transform is not None:
dataset.transform = transform
# If dataset is a VisionDataset, we need to initialize transforms, too.
if isinstance(dataset, VisionDataset):
dataset.transforms = StandardTransform(
transform, dataset.target_transform
)
dataset_obj.dataset = dataset
return dataset_obj
def __getitem__(self, index: int):
"""Returns (sample, target, fname) of item at index.
Args:
index:
Index of the queried item.
Returns:
The image, target, and filename of the item at index.
"""
fname = self.index_to_filename(self.dataset, index)
sample, target = self.dataset.__getitem__(index)
return sample, target, fname
def __len__(self):
"""Returns the length of the dataset."""
return len(self.dataset)
def __add__(self, other):
"""Adds another item to the dataset."""
raise NotImplementedError()
def get_filenames(self) -> List[str]:
"""Returns all filenames in the dataset."""
if hasattr(self.dataset, "get_filenames"):
return self.dataset.get_filenames()
list_of_filenames = []
for index in range(len(self)):
fname = self.index_to_filename(self.dataset, index)
list_of_filenames.append(fname)
return list_of_filenames
def dump(
self,
output_dir: str,
filenames: Union[List[str], None] = None,
format: Union[str, None] = None,
):
"""Saves images in the dataset to the output directory.
Will copy the images from the input directory to the output directory
if possible. If not (e.g. for VideoDatasets), will load the images and
then save them to the output directory with the specified format.
Args:
output_dir:
Output directory where the image is stored.
filenames:
Filenames of the images to store. If None, stores all images.
format:
Image format. Can be any pillow image format (png, jpg, ...).
By default we try to use the same format as the input data. If
not possible (e.g. for videos) we dump the image
as a png image to prevent compression artifacts.
"""
if self.dataset.transform is not None:
raise RuntimeError("Cannot dump dataset which applies transforms!")
# create directory if it doesn't exist yet
os.makedirs(output_dir, exist_ok=True)
# dump all the files if no filenames were passed, otherwise dump only
# the ones referenced in the list
if filenames is None:
indices = [i for i in range(self.__len__())]
filenames = self.get_filenames()
else:
indices = []
filenames = sorted(filenames)
all_filenames = self.get_filenames()
for index, filename in enumerate(all_filenames):
filename_index = bisect.bisect_left(filenames, filename)
# make sure the filename exists in filenames
if (
filename_index < len(filenames)
and filenames[filename_index] == filename
):
indices.append(index)
# dump images
for i, filename in zip(indices, filenames):
_dump_image(self.dataset, output_dir, filename, i, fmt=format)
def get_filepath_from_filename(self, filename: str, image: Image = None):
"""Returns the filepath given the filename of the image
There are three cases:
- The dataset is a regular dataset with the images in the input dir.
- The dataset is a video dataset, thus the images have to be saved in a
temporary folder.
- The dataset is a torch dataset, thus the images have to be saved in a
temporary folder.
Args:
filename:
The filename of the image
image:
The image corresponding to the filename
Returns:
The filename to the image, either the existing one (case 1) or a
newly created jpg (case 2, 3)
"""
has_input_dir = hasattr(self, "input_dir") and isinstance(self.input_dir, str)
if has_input_dir:
path_to_image = os.path.join(self.input_dir, filename)
if os.path.isfile(path_to_image):
# the file exists, return its filepath
return path_to_image
if image is None:
raise ValueError(
"The parameter image must not be None for"
"VideoDatasets and TorchDatasets"
)
# the file doesn't exist, save it as a jpg and return filepath
folder_path = tempfile.mkdtemp()
filepath = os.path.join(folder_path, filename) + ".jpg"
if os.path.dirname(filepath):
os.makedirs(os.path.dirname(filepath), exist_ok=True)
image.save(filepath)
return filepath
@property
def transform(self):
"""Getter for the transform of the dataset."""
return self.dataset.transform
@transform.setter
def transform(self, t):
"""Setter for the transform of the dataset."""
self.dataset.transform = t
| 13,255 | 35.021739 | 87 | py |
lightly | lightly-master/lightly/data/lightly_subset.py | from typing import Dict, List, Tuple
from lightly.data.dataset import LightlyDataset
class LightlySubset(LightlyDataset):
def __init__(self, base_dataset: LightlyDataset, filenames_subset: List[str]):
"""Creates a subset of a LightlyDataset.
Args:
base_dataset:
The dataset to subset from.
filenames_subset:
The filenames of the samples to be part of the subset.
"""
self.base_dataset = base_dataset
self.filenames_subset = filenames_subset
dict_base_dataset_filename_index: Dict[str, int] = dict()
for index in range(len(base_dataset)):
fname = base_dataset.index_to_filename(self.dataset, index)
dict_base_dataset_filename_index[fname] = index
self.mapping_subset_index_to_baseset_index = [
dict_base_dataset_filename_index[filename] for filename in filenames_subset
]
def __getitem__(self, index_subset: int) -> Tuple[object, object, str]:
"""An overwrite for indexing.
Args:
index_subset:
The index of a sample w.r.t. to the subset.
E.g. if index_subset == 0, the sample belonging to
the first filename in self.filenames_subset is returned.
Returns:
A tuple of the sample, its target and its filename.
"""
index_baseset = self.mapping_subset_index_to_baseset_index[index_subset]
sample, target, fname = self.base_dataset.__getitem__(index_baseset)
return sample, target, fname
def __len__(self) -> int:
"""Overwrites the len(...) function.
Returns:
The number of samples in the subset.
"""
return len(self.filenames_subset)
def get_filenames(self) -> List[str]:
"""Returns all filenames in the subset."""
return self.filenames_subset
def index_to_filename(self, dataset, index_subset: int):
"""Maps from an index of a sample to its filename.
Args:
dataset:
Unused, but specified by the overwritten
function of the parent class.
index_subset:
The index of the sample w.r.t. the subset.
Returns:
The filename of the sample.
"""
fname = self.filenames_subset[index_subset]
return fname
@property
def input_dir(self):
return self.base_dataset.input_dir
@property
def dataset(self):
return self.base_dataset.dataset
| 2,569 | 31.125 | 87 | py |
lightly | lightly-master/lightly/data/multi_view_collate.py | from typing import List, Tuple
from warnings import warn
import torch
from torch import Tensor
class MultiViewCollate:
"""Collate function that combines views from multiple images into a batch.
Example:
>>> transform = SimCLRTransform()
>>> dataset = LightlyDataset(input_dir, transform=transform)
>>> dataloader = DataLoader(dataset, batch_size=4, collate_fn=MultiViewCollate())
>>> for views, targets, filenames in dataloader:
>>> view0, view1 = views # each view is a tensor of shape (batch_size, channels, height, weidth)
>>>
"""
def __call__(
self, batch: List[Tuple[List[Tensor], int, str]]
) -> Tuple[List[Tensor], Tensor, List[str]]:
"""Turns a batch of (views, label, filename) tuples into single
(views, labels, filenames) tuple.
Args:
batch:
The input batch as a list of (views, label, filename) tuples, one for
each image in the batch. In particular, views is a list of N view
tensors. Every view tensor is a transformed version of the original
image. Label and filename are the class label and filename of the
corresponding image.
Example:
>>> batch = [
>>> ([img_0_view_0, ..., img_0_view_N], label_0, filename_0), # image 0
>>> ([img_1_view_0, ..., img_1_view_N], label_1, filename_1), # image 1
>>> ...
>>> ([img_B_view_0, ..., img_B_view_N], label_B, filename_B]), # image B
>>> ]
Returns:
A (views, labels, filenames) tuple. Views is a list of tensors with each
tensor containing one view for every image in the batch.
Example:
>>> output = (
>>> [
>>> Tensor([img_0_view_0, ..., img_B_view_0]), # view 0
>>> Tensor([img_0_view_1, ..., img_B_view_1]), # view 1
>>> ...
>>> Tensor([img_0_view_N, ..., img_B_view_N]), # view N
>>> ],
>>> [label_0, ..., label_B],
>>> [filename_0, ..., filename_B],
>>> )
"""
if len(batch) == 0:
warn("MultiViewCollate received empty batch.")
return [], [], []
views = [[] for _ in range(len(batch[0][0]))]
labels = []
fnames = []
for img, label, fname in batch:
for i, view in enumerate(img):
views[i].append(view.unsqueeze(0))
labels.append(label)
fnames.append(fname)
for i, view in enumerate(views):
views[i] = torch.cat(view)
labels = torch.tensor(
labels, dtype=torch.long
) # Conversion to tensor to ensure backwards compatibility
return views, labels, fnames
| 3,022 | 37.75641 | 108 | py |
lightly | lightly-master/lightly/embedding/__init__.py | """The lightly.embedding module provides trainable embedding strategies.
The embedding models use a pre-trained ResNet but should be finetuned on each
dataset instance.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.embedding._base import BaseEmbedding
from lightly.embedding.embedding import SelfSupervisedEmbedding
| 366 | 27.230769 | 77 | py |
lightly | lightly-master/lightly/embedding/_base.py | """ BaseEmbeddings """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import copy
import os
import omegaconf
from omegaconf import DictConfig
from pytorch_lightning import LightningModule, Trainer
from lightly.embedding import callbacks
class BaseEmbedding(LightningModule):
"""All trainable embeddings must inherit from BaseEmbedding."""
def __init__(self, model, criterion, optimizer, dataloader, scheduler=None):
"""Constructor
Args:
model: (torch.nn.Module)
criterion: (torch.nn.Module)
optimizer: (torch.optim.Optimizer)
dataloader: (torch.utils.data.DataLoader)
"""
super(BaseEmbedding, self).__init__()
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.dataloader = dataloader
self.scheduler = scheduler
self.checkpoint = None
self.cwd = os.getcwd()
def forward(self, x0, x1):
return self.model(x0, x1)
def training_step(self, batch, batch_idx):
# get the two image transformations
(x0, x1), _, _ = batch
# forward pass of the transformations
y0, y1 = self(x0, x1)
# calculate loss
loss = self.criterion(y0, y1)
# log loss and return
self.log("loss", loss)
return loss
def configure_optimizers(self):
if self.scheduler is None:
return self.optimizer
else:
return [self.optimizer], [self.scheduler]
def train_dataloader(self):
return self.dataloader
def train_embedding(
self,
trainer_config: DictConfig,
checkpoint_callback_config: DictConfig,
summary_callback_config: DictConfig,
):
"""Train the model on the provided dataset.
Args:
trainer_config: pylightning_trainer arguments, examples include:
min_epochs: (int) Minimum number of epochs to train
max_epochs: (int) Maximum number of epochs to train
gpus: (int) Number of gpus to use
enable_model_summary: (bool) Whether to enable model summarisation.
weights_summary: (str) DEPRECATED. How to print a summary of the model and weights.
checkpoint_callback_config: ModelCheckpoint callback arguments
summary_callback_config: ModelSummary callback arguments
Returns:
A trained encoder, ready for embedding datasets.
"""
trainer_callbacks = []
checkpoint_cb = callbacks.create_checkpoint_callback(
**checkpoint_callback_config
)
trainer_callbacks.append(checkpoint_cb)
summary_cb = callbacks.create_summary_callback(
summary_callback_config=summary_callback_config,
trainer_config=trainer_config,
)
if summary_cb is not None:
trainer_callbacks.append(summary_cb)
# Remove weights_summary from trainer_config now that the summary callback
# has been created. TODO: Drop support for the "weights_summary" argument.
trainer_config_copy = copy.deepcopy(trainer_config)
if "weights_summary" in trainer_config_copy:
with omegaconf.open_dict(trainer_config_copy):
del trainer_config_copy["weights_summary"]
trainer = Trainer(**trainer_config_copy, callbacks=trainer_callbacks)
trainer.fit(self)
if checkpoint_cb.best_model_path != "":
self.checkpoint = os.path.join(self.cwd, checkpoint_cb.best_model_path)
def embed(self, *args, **kwargs):
"""Must be implemented by classes which inherit from BaseEmbedding."""
raise NotImplementedError()
| 3,778 | 32.149123 | 99 | py |
lightly | lightly-master/lightly/embedding/callbacks.py | import os
from omegaconf import DictConfig
from pytorch_lightning.callbacks import ModelCheckpoint, ModelSummary
from lightly.utils.hipify import print_as_warning
def create_checkpoint_callback(
save_last=False,
save_top_k=0,
monitor="loss",
dirpath=None,
) -> ModelCheckpoint:
"""Initializes the checkpoint callback.
Args:
save_last:
Whether or not to save the checkpoint of the last epoch.
save_top_k:
Save the top_k model checkpoints.
monitor:
Which quantity to monitor.
dirpath:
Where to save the checkpoint.
"""
return ModelCheckpoint(
dirpath=os.getcwd() if dirpath is None else dirpath,
filename="lightly_epoch_{epoch:d}",
save_last=save_last,
save_top_k=save_top_k,
monitor=monitor,
auto_insert_metric_name=False,
)
def create_summary_callback(
summary_callback_config: DictConfig, trainer_config: DictConfig
) -> ModelSummary:
"""Creates a summary callback."""
# TODO: Drop support for the "weights_summary" argument.
weights_summary = trainer_config.get("weights_summary", None)
if weights_summary not in [None, "None"]:
return _create_summary_callback_deprecated(weights_summary)
else:
return _create_summary_callback(**summary_callback_config)
def _create_summary_callback(max_depth: int) -> ModelSummary:
"""Initializes the model summary callback.
See `ModelSummary reference documentation
<https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelSummary.html?highlight=ModelSummary>`.
Args:
max_depth:
The maximum depth of layer nesting that the summary will include.
"""
return ModelSummary(max_depth=max_depth)
def _create_summary_callback_deprecated(weights_summary: str) -> ModelSummary:
"""Constructs summary callback from the deprecated ``weights_summary`` argument.
The ``weights_summary`` trainer argument was deprecated with the release
of pytorch lightning 1.7 in 08/2022. Support for this will be removed
in the future.
"""
print_as_warning(
"The configuration parameter 'trainer.weights_summary' is deprecated."
" Please use 'trainer.weights_summary: True' and set"
" 'checkpoint_callback.max_depth' to value 1 for the option 'top'"
" or -1 for the option 'full'."
)
if weights_summary == "top":
max_depth = 1
elif weights_summary == "full":
max_depth = -1
else:
raise ValueError(
"Invalid value for the deprecated trainer.weights_summary"
" configuration parameter."
)
return _create_summary_callback(max_depth=max_depth)
| 2,775 | 31.658824 | 131 | py |
lightly | lightly-master/lightly/embedding/embedding.py | """ Embedding Strategies """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import time
from typing import List, Tuple, Union
import numpy as np
import torch
from tqdm import tqdm
import lightly
from lightly.embedding._base import BaseEmbedding
from lightly.utils.reordering import sort_items_by_keys
class SelfSupervisedEmbedding(BaseEmbedding):
"""Implementation of self-supervised embedding models.
Implements an embedding strategy based on self-supervised learning. A
model backbone, self-supervised criterion, optimizer, and dataloader are
passed to the constructor. The embedding itself is a pytorch-lightning
module.
The implementation is based on contrastive learning.
* SimCLR: https://arxiv.org/abs/2002.05709
* MoCo: https://arxiv.org/abs/1911.05722
* SimSiam: https://arxiv.org/abs/2011.10566
Attributes:
model:
A backbone convolutional network with a projection head.
criterion:
A contrastive loss function.
optimizer:
A PyTorch optimizer.
dataloader:
A torchvision dataloader.
scheduler:
A PyTorch learning rate scheduler.
Examples:
>>> # define a model, criterion, optimizer, and dataloader above
>>> import lightly.embedding as embedding
>>> encoder = SelfSupervisedEmbedding(
>>> model,
>>> criterion,
>>> optimizer,
>>> dataloader,
>>> )
>>> # train the self-supervised embedding with default settings
>>> encoder.train_embedding()
>>> # pass pytorch-lightning trainer arguments as kwargs
>>> encoder.train_embedding(max_epochs=10)
"""
def __init__(
self,
model: torch.nn.Module,
criterion: torch.nn.Module,
optimizer: torch.optim.Optimizer,
dataloader: torch.utils.data.DataLoader,
scheduler=None,
):
super(SelfSupervisedEmbedding, self).__init__(
model, criterion, optimizer, dataloader, scheduler
)
def embed(
self, dataloader: torch.utils.data.DataLoader, device: torch.device = None
) -> Tuple[np.ndarray, List[int], List[str]]:
"""Embeds images in a vector space.
Args:
dataloader:
A PyTorch dataloader.
device:
Selected device (`cpu`, `cuda`, see PyTorch documentation)
Returns:
Tuple of (embeddings, labels, filenames) ordered by the
samples in the dataset of the dataloader.
embeddings:
Embedding of shape (n_samples, embedding_feature_size).
One embedding for each sample.
labels:
Labels of shape (n_samples, ).
filenames:
The filenames from dataloader.dataset.get_filenames().
Examples:
>>> # embed images in vector space
>>> embeddings, labels, fnames = encoder.embed(dataloader)
"""
self.model.eval()
embeddings, labels, filenames = None, None, []
dataset = dataloader.dataset
pbar = tqdm(total=len(dataset), unit="imgs")
efficiency = 0.0
embeddings = []
labels = []
with torch.no_grad():
start_timepoint = time.time()
for image_batch, label_batch, filename_batch in dataloader:
batch_size = image_batch.shape[0]
# the following 2 lines are needed to prevent a file handler leak,
# see https://github.com/lightly-ai/lightly/pull/676
image_batch = image_batch.to(device)
label_batch = label_batch.clone()
filenames += [*filename_batch]
prepared_timepoint = time.time()
embedding_batch = self.model.backbone(image_batch)
embedding_batch = embedding_batch.detach().reshape(batch_size, -1)
embeddings.append(embedding_batch)
labels.append(label_batch)
finished_timepoint = time.time()
data_loading_time = prepared_timepoint - start_timepoint
inference_time = finished_timepoint - prepared_timepoint
total_batch_time = data_loading_time + inference_time
efficiency = inference_time / total_batch_time
pbar.set_description("Compute efficiency: {:.2f}".format(efficiency))
start_timepoint = time.time()
pbar.update(batch_size)
embeddings = torch.cat(embeddings, 0)
labels = torch.cat(labels, 0)
embeddings = embeddings.cpu().numpy()
labels = labels.cpu().numpy()
sorted_filenames = dataset.get_filenames()
sorted_embeddings = sort_items_by_keys(filenames, embeddings, sorted_filenames)
sorted_labels = sort_items_by_keys(filenames, labels, sorted_filenames)
embeddings = np.stack(sorted_embeddings)
labels = np.stack(sorted_labels).tolist()
return embeddings, labels, sorted_filenames
| 5,193 | 32.294872 | 87 | py |
lightly | lightly-master/lightly/loss/__init__.py | """The lightly.loss package provides loss functions for self-supervised learning. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.loss.barlow_twins_loss import BarlowTwinsLoss
from lightly.loss.dcl_loss import DCLLoss, DCLWLoss
from lightly.loss.dino_loss import DINOLoss
from lightly.loss.msn_loss import MSNLoss
from lightly.loss.negative_cosine_similarity import NegativeCosineSimilarity
from lightly.loss.ntx_ent_loss import NTXentLoss
from lightly.loss.pmsn_loss import PMSNCustomLoss, PMSNLoss
from lightly.loss.swav_loss import SwaVLoss
from lightly.loss.sym_neg_cos_sim_loss import SymNegCosineSimilarityLoss
from lightly.loss.tico_loss import TiCoLoss
from lightly.loss.vicreg_loss import VICRegLoss
from lightly.loss.vicregl_loss import VICRegLLoss
| 805 | 43.777778 | 85 | py |
lightly | lightly-master/lightly/loss/barlow_twins_loss.py | import torch
import torch.distributed as dist
class BarlowTwinsLoss(torch.nn.Module):
"""Implementation of the Barlow Twins Loss from Barlow Twins[0] paper.
This code specifically implements the Figure Algorithm 1 from [0].
[0] Zbontar,J. et.al, 2021, Barlow Twins... https://arxiv.org/abs/2103.03230
Examples:
>>> # initialize loss function
>>> loss_fn = BarlowTwinsLoss()
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimSiam model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(self, lambda_param: float = 5e-3, gather_distributed: bool = False):
"""Lambda param configuration with default value like in [0]
Args:
lambda_param:
Parameter for importance of redundancy reduction term.
Defaults to 5e-3 [0].
gather_distributed:
If True then the cross-correlation matrices from all gpus are
gathered and summed before the loss calculation.
"""
super(BarlowTwinsLoss, self).__init__()
self.lambda_param = lambda_param
self.gather_distributed = gather_distributed
if gather_distributed and not dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
def forward(self, z_a: torch.Tensor, z_b: torch.Tensor) -> torch.Tensor:
device = z_a.device
# normalize repr. along the batch dimension
z_a_norm = (z_a - z_a.mean(0)) / z_a.std(0) # NxD
z_b_norm = (z_b - z_b.mean(0)) / z_b.std(0) # NxD
N = z_a.size(0)
D = z_a.size(1)
# cross-correlation matrix
c = torch.mm(z_a_norm.T, z_b_norm) / N # DxD
# sum cross-correlation matrix between multiple gpus
if self.gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
if world_size > 1:
c = c / world_size
dist.all_reduce(c)
# loss
c_diff = (c - torch.eye(D, device=device)).pow(2) # DxD
# multiply off-diagonal elems of c_diff by lambda
c_diff[~torch.eye(D, dtype=bool)] *= self.lambda_param
loss = c_diff.sum()
return loss
| 2,625 | 33.103896 | 86 | py |
lightly | lightly-master/lightly/loss/dcl_loss.py | from functools import partial
from typing import Callable, Optional
import torch
from torch import Tensor
from torch import distributed as torch_dist
from torch import nn
from lightly.utils import dist
def negative_mises_fisher_weights(
out0: Tensor, out1: Tensor, sigma: float = 0.5
) -> torch.Tensor:
"""Negative Mises-Fisher weighting function as presented in Decoupled
Contrastive Learning [0].
The implementation was inspired by [1].
- [0] Chun-Hsiao Y. et. al., 2021, Decoupled Contrastive Learning https://arxiv.org/abs/2110.06848
- [1] https://github.com/raminnakhli/Decoupled-Contrastive-Learning
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
sigma:
Similarities are scaled by inverse sigma.
Returns:
A tensor with shape (batch_size,) where each entry is the weight for one
of the input images.
"""
similarity = torch.einsum("nm,nm->n", out0.detach(), out1.detach()) / sigma
return 2 - out0.shape[0] * nn.functional.softmax(similarity, dim=0)
class DCLLoss(nn.Module):
"""Implementation of the Decoupled Contrastive Learning Loss from
Decoupled Contrastive Learning [0].
This code implements Equation 6 in [0], including the sum over all images `i`
and views `k`. The loss is reduced to a mean loss over the mini-batch.
The implementation was inspired by [1].
- [0] Chun-Hsiao Y. et. al., 2021, Decoupled Contrastive Learning https://arxiv.org/abs/2110.06848
- [1] https://github.com/raminnakhli/Decoupled-Contrastive-Learning
Attributes:
temperature:
Similarities are scaled by inverse temperature.
weight_fn:
Weighting function `w` from the paper. Scales the loss between the
positive views (views from the same image). No weighting is performed
if weight_fn is None. The function must take the two input tensors
passed to the forward call as input and return a weight tensor. The
returned weight tensor must have the same length as the input tensors.
gather_distributed:
If True then negatives from all gpus are gathered before the
loss calculation.
Examples:
>>> loss_fn = DCLLoss(temperature=0.07)
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # embed images using some model, for example SimCLR
>>> out0 = model(t0)
>>> out1 = model(t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
>>>
>>> # you can also add a custom weighting function
>>> weight_fn = lambda out0, out1: torch.sum((out0 - out1) ** 2, dim=1)
>>> loss_fn = DCLLoss(weight_fn=weight_fn)
"""
def __init__(
self,
temperature: float = 0.1,
weight_fn: Optional[Callable[[Tensor, Tensor], Tensor]] = None,
gather_distributed: bool = False,
):
super().__init__()
self.temperature = temperature
self.weight_fn = weight_fn
self.gather_distributed = gather_distributed
if gather_distributed and not torch_dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
def forward(
self,
out0: Tensor,
out1: Tensor,
) -> Tensor:
"""Forward pass of the DCL loss.
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
Returns:
Mean loss over the mini-batch.
"""
# normalize the output to length 1
out0 = nn.functional.normalize(out0, dim=1)
out1 = nn.functional.normalize(out1, dim=1)
if self.gather_distributed and dist.world_size() > 1:
# gather representations from other processes if necessary
out0_all = torch.cat(dist.gather(out0), 0)
out1_all = torch.cat(dist.gather(out1), 0)
else:
out0_all = out0
out1_all = out1
# calculate symmetric loss
loss0 = self._loss(out0, out1, out0_all, out1_all)
loss1 = self._loss(out1, out0, out1_all, out0_all)
return 0.5 * (loss0 + loss1)
def _loss(self, out0, out1, out0_all, out1_all):
"""Calculates DCL loss for out0 with respect to its positives in out1
and the negatives in out1, out0_all, and out1_all.
This code implements Equation 6 in [0], including the sum over all images `i`
but with `k` fixed at 0.
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
out0_all:
Output projections of the first set of transformed images from
all distributed processes/gpus. Should be equal to out0 in an
undistributed setting.
Shape (batch_size * world_size, embedding_size)
out1_all:
Output projections of the second set of transformed images from
all distributed processes/gpus. Should be equal to out1 in an
undistributed setting.
Shape (batch_size * world_size, embedding_size)
Returns:
Mean loss over the mini-batch.
"""
# create diagonal mask that only selects similarities between
# representations of the same images
batch_size = out0.shape[0]
if self.gather_distributed and dist.world_size() > 1:
diag_mask = dist.eye_rank(batch_size, device=out0.device)
else:
diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)
# calculate similarities
# here n = batch_size and m = batch_size * world_size.
sim_00 = torch.einsum("nc,mc->nm", out0, out0_all) / self.temperature
sim_01 = torch.einsum("nc,mc->nm", out0, out1_all) / self.temperature
positive_loss = -sim_01[diag_mask]
if self.weight_fn:
positive_loss = positive_loss * self.weight_fn(out0, out1)
# remove simliarities between same views of the same image
sim_00 = sim_00[~diag_mask].view(batch_size, -1)
# remove similarities between different views of the same images
# this is the key difference compared to NTXentLoss
sim_01 = sim_01[~diag_mask].view(batch_size, -1)
negative_loss_00 = torch.logsumexp(sim_00, dim=1)
negative_loss_01 = torch.logsumexp(sim_01, dim=1)
return (positive_loss + negative_loss_00 + negative_loss_01).mean()
class DCLWLoss(DCLLoss):
"""Implementation of the Weighted Decoupled Contrastive Learning Loss from
Decoupled Contrastive Learning [0].
This code implements Equation 6 in [0] with a negative Mises-Fisher
weighting function. The loss returns the mean over all images `i` and
views `k` in the mini-batch. The implementation was inspired by [1].
- [0] Chun-Hsiao Y. et. al., 2021, Decoupled Contrastive Learning https://arxiv.org/abs/2110.06848
- [1] https://github.com/raminnakhli/Decoupled-Contrastive-Learning
Attributes:
temperature:
Similarities are scaled by inverse temperature.
sigma:
Similar to temperature but applies the inverse scaling in the
weighting function.
gather_distributed:
If True then negatives from all gpus are gathered before the
loss calculation.
Examples:
>>> loss_fn = DCLWLoss(temperature=0.07)
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # embed images using some model, for example SimCLR
>>> out0 = model(t0)
>>> out1 = model(t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(
self,
temperature: float = 0.1,
sigma: float = 0.5,
gather_distributed: bool = False,
):
super().__init__(
temperature=temperature,
weight_fn=partial(negative_mises_fisher_weights, sigma=sigma),
gather_distributed=gather_distributed,
)
| 9,091 | 36.262295 | 102 | py |
lightly | lightly-master/lightly/loss/dino_loss.py | from typing import List
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
class DINOLoss(nn.Module):
"""
Implementation of the loss described in 'Emerging Properties in
Self-Supervised Vision Transformers'. [0]
This implementation follows the code published by the authors. [1]
It supports global and local image crops. A linear warmup schedule for the
teacher temperature is implemented to stabilize training at the beginning.
Centering is applied to the teacher output to avoid model collapse.
- [0]: DINO, 2021, https://arxiv.org/abs/2104.14294
- [1]: https://github.com/facebookresearch/dino
Attributes:
output_dim:
Dimension of the model output.
warmup_teacher_temp:
Initial value of the teacher temperature. Should be decreased if the
training loss does not decrease.
teacher_temp:
Final value of the teacher temperature after linear warmup. Values
above 0.07 result in unstable behavior in most cases. Can be
slightly increased to improve performance during finetuning.
warmup_teacher_temp_epochs:
Number of epochs for the teacher temperature warmup.
student_temp:
Temperature of the student.
center_momentum:
Momentum term for the center calculation.
Examples:
>>> # initialize loss function
>>> loss_fn = DINOLoss(128)
>>>
>>> # generate a view of the images with a random transform
>>> view = transform(images)
>>>
>>> # embed the view with a student and teacher model
>>> teacher_out = teacher(view)
>>> student_out = student(view)
>>>
>>> # calculate loss
>>> loss = loss_fn([teacher_out], [student_out], epoch=0)
"""
def __init__(
self,
output_dim: int = 65536,
warmup_teacher_temp: float = 0.04,
teacher_temp: float = 0.04,
warmup_teacher_temp_epochs: int = 30,
student_temp: float = 0.1,
center_momentum: float = 0.9,
):
super().__init__()
self.warmup_teacher_temp_epochs = warmup_teacher_temp_epochs
self.teacher_temp = teacher_temp
self.student_temp = student_temp
self.center_momentum = center_momentum
self.register_buffer("center", torch.zeros(1, 1, output_dim))
# we apply a warm up for the teacher temperature because
# a too high temperature makes the training instable at the beginning
self.teacher_temp_schedule = torch.linspace(
start=warmup_teacher_temp,
end=teacher_temp,
steps=warmup_teacher_temp_epochs,
)
def forward(
self,
teacher_out: List[torch.Tensor],
student_out: List[torch.Tensor],
epoch: int,
) -> torch.Tensor:
"""Cross-entropy between softmax outputs of the teacher and student
networks.
Args:
teacher_out:
List of view feature tensors from the teacher model. Each
tensor is assumed to contain features from one view of the batch
and have length batch_size.
student_out:
List of view feature tensors from the student model. Each tensor
is assumed to contain features from one view of the batch and
have length batch_size.
epoch:
The current training epoch.
Returns:
The average cross-entropy loss.
"""
# get teacher temperature
if epoch < self.warmup_teacher_temp_epochs:
teacher_temp = self.teacher_temp_schedule[epoch]
else:
teacher_temp = self.teacher_temp
teacher_out = torch.stack(teacher_out)
t_out = F.softmax((teacher_out - self.center) / teacher_temp, dim=-1)
student_out = torch.stack(student_out)
s_out = F.log_softmax(student_out / self.student_temp, dim=-1)
# calculate feature similarities where:
# b = batch_size, t = n_views_teacher, s = n_views_student, d = output_dim
# the diagonal is ignored as it contains features from the same views
loss = -torch.einsum("tbd,sbd->ts", t_out, s_out)
loss.fill_diagonal_(0)
# number of loss terms, ignoring the diagonal
n_terms = loss.numel() - loss.diagonal().numel()
batch_size = teacher_out.shape[1]
loss = loss.sum() / (n_terms * batch_size)
self.update_center(teacher_out)
return loss
@torch.no_grad()
def update_center(self, teacher_out: torch.Tensor) -> None:
"""Moving average update of the center used for the teacher output.
Args:
teacher_out:
Stacked output from the teacher model.
"""
batch_center = torch.mean(teacher_out, dim=(0, 1), keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(batch_center)
batch_center = batch_center / dist.get_world_size()
# ema update
self.center = self.center * self.center_momentum + batch_center * (
1 - self.center_momentum
)
| 5,297 | 34.557047 | 82 | py |
lightly | lightly-master/lightly/loss/hypersphere_loss.py | """
FIXME: hypersphere is perhaps bad naming as I am not sure it is the essence;
alignment-and-uniformity loss perhaps? Does not sound as nice.
"""
import torch
import torch.nn.functional as F
class HypersphereLoss(torch.nn.Module):
"""
Implementation of the loss described in 'Understanding Contrastive Representation Learning through
Alignment and Uniformity on the Hypersphere.' [0]
[0] Tongzhou Wang. et.al, 2020, ... https://arxiv.org/abs/2005.10242
Note:
In order for this loss to function as advertized, an l1-normalization to the hypersphere is required.
This loss function applies this l1-normalization internally in the loss-layer.
However, it is recommended that the same normalization is also applied in your architecture,
considering that this l1-loss is also intended to be applied during inference.
Perhaps there may be merit in leaving it out of the inferrence pathway, but this use has not been tested.
Moreover it is recommended that the layers preceeding this loss function are either a linear layer without activation,
a batch-normalization layer, or both. The directly upstream architecture can have a large influence
on the ability of this loss to achieve its stated aim of promoting uniformity on the hypersphere;
and if by contrast the last layer going into the embedding is a RELU or similar nonlinearity,
we may see that we will never get very close to achieving the goal of uniformity on the hypersphere,
but will confine ourselves to the subspace of positive activations.
Similar architectural considerations are relevant to most contrastive loss functions,
but we call it out here explicitly.
Examples:
>>> # initialize loss function
>>> loss_fn = HypersphereLoss()
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimSiam model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(self, t=1.0, lam=1.0, alpha=2.0):
"""Parameters as described in [0]
Args:
t : float
Temperature parameter;
proportional to the inverse variance of the Gaussians used to measure uniformity
lam : float:
Weight balancing the alignment and uniformity loss terms
alpha : float
Power applied to the alignment term of the loss. At its default value of 2,
distances between positive samples are penalized in an l-2 sense.
"""
super(HypersphereLoss, self).__init__()
self.t = t
self.lam = lam
self.alpha = alpha
def forward(self, z_a: torch.Tensor, z_b: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor, [b, d], float)
y (torch.Tensor, [b, d], float)
Returns:
Loss (torch.Tensor, [], float)
"""
x = F.normalize(z_a)
y = F.normalize(z_b)
def lalign(x, y):
return (x - y).norm(dim=1).pow(self.alpha).mean()
def lunif(x):
sq_pdist = torch.pdist(x, p=2).pow(2)
return sq_pdist.mul(-self.t).exp().mean().log()
return lalign(x, y) + self.lam * (lunif(x) + lunif(y)) / 2
| 3,477 | 37.644444 | 126 | py |
lightly | lightly-master/lightly/loss/memory_bank.py | """ Memory Bank Wrapper """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import functools
import torch
class MemoryBankModule(torch.nn.Module):
"""Memory bank implementation
This is a parent class to all loss functions implemented by the lightly
Python package. This way, any loss can be used with a memory bank if
desired.
Attributes:
size:
Number of keys the memory bank can store. If set to 0,
memory bank is not used.
Examples:
>>> class MyLossFunction(MemoryBankModule):
>>>
>>> def __init__(self, memory_bank_size: int = 2 ** 16):
>>> super(MyLossFunction, self).__init__(memory_bank_size)
>>>
>>> def forward(self, output: torch.Tensor,
>>> labels: torch.Tensor = None):
>>>
>>> output, negatives = super(
>>> MyLossFunction, self).forward(output)
>>>
>>> if negatives is not None:
>>> # evaluate loss with negative samples
>>> else:
>>> # evaluate loss without negative samples
"""
def __init__(self, size: int = 2**16):
super(MemoryBankModule, self).__init__()
if size < 0:
msg = f"Illegal memory bank size {size}, must be non-negative."
raise ValueError(msg)
self.size = size
self.register_buffer(
"bank", tensor=torch.empty(0, dtype=torch.float), persistent=False
)
self.register_buffer(
"bank_ptr", tensor=torch.empty(0, dtype=torch.long), persistent=False
)
@torch.no_grad()
def _init_memory_bank(self, dim: int):
"""Initialize the memory bank if it's empty
Args:
dim:
The dimension of the which are stored in the bank.
"""
# create memory bank
# we could use register buffers like in the moco repo
# https://github.com/facebookresearch/moco but we don't
# want to pollute our checkpoints
self.bank = torch.randn(dim, self.size).type_as(self.bank)
self.bank = torch.nn.functional.normalize(self.bank, dim=0)
self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)
@torch.no_grad()
def _dequeue_and_enqueue(self, batch: torch.Tensor):
"""Dequeue the oldest batch and add the latest one
Args:
batch:
The latest batch of keys to add to the memory bank.
"""
batch_size = batch.shape[0]
ptr = int(self.bank_ptr)
if ptr + batch_size >= self.size:
self.bank[:, ptr:] = batch[: self.size - ptr].T.detach()
self.bank_ptr[0] = 0
else:
self.bank[:, ptr : ptr + batch_size] = batch.T.detach()
self.bank_ptr[0] = ptr + batch_size
def forward(
self, output: torch.Tensor, labels: torch.Tensor = None, update: bool = False
):
"""Query memory bank for additional negative samples
Args:
output:
The output of the model.
labels:
Should always be None, will be ignored.
Returns:
The output if the memory bank is of size 0, otherwise the output
and the entries from the memory bank.
"""
# no memory bank, return the output
if self.size == 0:
return output, None
_, dim = output.shape
# initialize the memory bank if it is not already done
if self.bank.nelement() == 0:
self._init_memory_bank(dim)
# query and update memory bank
bank = self.bank.clone().detach()
# only update memory bank if we later do backward pass (gradient)
if update:
self._dequeue_and_enqueue(output)
return output, bank
| 3,923 | 29.65625 | 85 | py |
lightly | lightly-master/lightly/loss/msn_loss.py | import math
import warnings
from typing import Union
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
def prototype_probabilities(
queries: Tensor,
prototypes: Tensor,
temperature: float,
) -> Tensor:
"""Returns probability for each query to belong to each prototype.
Args:
queries:
Tensor with shape (batch_size, dim)
prototypes:
Tensor with shape (num_prototypes, dim)
temperature:
Inverse scaling factor for the similarity.
Returns:
Probability tensor with shape (batch_size, num_prototypes) which sums to 1 along
the num_prototypes dimension.
"""
return F.softmax(torch.matmul(queries, prototypes.T) / temperature, dim=1)
def sharpen(probabilities: Tensor, temperature: float) -> Tensor:
"""Sharpens the probabilities with the given temperature.
Args:
probabilities:
Tensor with shape (batch_size, dim)
temperature:
Temperature in (0, 1]. Lower temperature results in stronger sharpening (
output probabilities are less uniform).
Returns:
Probabilities tensor with shape (batch_size, dim).
"""
probabilities = probabilities ** (1.0 / temperature)
probabilities /= torch.sum(probabilities, dim=1, keepdim=True)
return probabilities
@torch.no_grad()
def sinkhorn(
probabilities: Tensor,
iterations: int = 3,
gather_distributed: bool = False,
) -> Tensor:
"""Runs sinkhorn normalization on the probabilities as described in [0].
Code inspired by [1].
- [0]: Masked Siamese Networks, 2022, https://arxiv.org/abs/2204.07141
- [1]: https://github.com/facebookresearch/msn
Args:
probabilities:
Probabilities tensor with shape (batch_size, num_prototypes).
iterations:
Number of iterations of the sinkhorn algorithms. Set to 0 to disable.
gather_distributed:
If True then features from all gpus are gathered during normalization.
Returns:
A normalized probabilities tensor.
"""
if iterations <= 0:
return probabilities
world_size = 1
if gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
num_targets, num_prototypes = probabilities.shape
probabilities = probabilities.T
sum_probabilities = torch.sum(probabilities)
if world_size > 1:
dist.all_reduce(sum_probabilities)
probabilities = probabilities / sum_probabilities
for _ in range(iterations):
# normalize rows
row_sum = torch.sum(probabilities, dim=1, keepdim=True)
if world_size > 1:
dist.all_reduce(row_sum)
probabilities /= row_sum
probabilities /= num_prototypes
# normalize columns
probabilities /= torch.sum(probabilities, dim=0, keepdim=True)
probabilities /= num_targets
probabilities *= num_targets
return probabilities.T
class MSNLoss(nn.Module):
"""Implementation of the loss function from MSN [0].
Code inspired by [1].
- [0]: Masked Siamese Networks, 2022, https://arxiv.org/abs/2204.07141
- [1]: https://github.com/facebookresearch/msn
Attributes:
temperature:
Similarities between anchors and targets are scaled by the inverse of
the temperature. Must be in (0, inf).
sinkhorn_iterations:
Number of sinkhorn normalization iterations on the targets.
regularization_weight:
Weight factor lambda by which the regularization loss is scaled. Set to 0
to disable regularization.
me_max_weight:
Deprecated, use `regularization_weight` instead. Takes precendence over
`regularization_weight` if not None. Weight factor lambda by which the mean
entropy maximization regularization loss is scaled. Set to 0 to disable
mean entropy maximization reguliarization.
gather_distributed:
If True, then target probabilities are gathered from all GPUs.
Examples:
>>> # initialize loss function
>>> loss_fn = MSNLoss()
>>>
>>> # generate anchors and targets of images
>>> anchors = transforms(images)
>>> targets = transforms(images)
>>>
>>> # feed through MSN model
>>> anchors_out = model(anchors)
>>> targets_out = model.target(targets)
>>>
>>> # calculate loss
>>> loss = loss_fn(anchors_out, targets_out, prototypes=model.prototypes)
"""
def __init__(
self,
temperature: float = 0.1,
sinkhorn_iterations: int = 3,
regularization_weight: float = 1.0,
me_max_weight: Union[float, None] = None,
gather_distributed: bool = False,
):
super().__init__()
if temperature <= 0:
raise ValueError(f"temperature must be in (0, inf) but is {temperature}.")
if sinkhorn_iterations < 0:
raise ValueError(
f"sinkhorn_iterations must be >= 0 but is {sinkhorn_iterations}."
)
if gather_distributed and not dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.regularization_weight = regularization_weight
# set regularization_weight to me_max_weight for backwards compatibility
if me_max_weight is not None:
warnings.warn(
DeprecationWarning(
"me_max_weight is deprecated in favor of regularization_weight and "
"will be removed in the future."
)
)
self.regularization_weight = me_max_weight
self.gather_distributed = gather_distributed
def forward(
self,
anchors: Tensor,
targets: Tensor,
prototypes: Tensor,
target_sharpen_temperature: float = 0.25,
) -> Tensor:
"""Computes the MSN loss for a set of anchors, targets and prototypes.
Args:
anchors:
Tensor with shape (batch_size * anchor_views, dim).
targets:
Tensor with shape (batch_size, dim).
prototypes:
Tensor with shape (num_prototypes, dim).
target_sharpen_temperature:
Temperature used to sharpen the target probabilities.
Returns:
Mean loss over all anchors.
"""
num_views = anchors.shape[0] // targets.shape[0]
anchors = F.normalize(anchors, dim=1)
targets = F.normalize(targets, dim=1)
prototypes = F.normalize(prototypes, dim=1)
# anchor predictions
anchor_probs = prototype_probabilities(
anchors, prototypes, temperature=self.temperature
)
# target predictions
with torch.no_grad():
target_probs = prototype_probabilities(
targets, prototypes, temperature=self.temperature
)
target_probs = sharpen(target_probs, temperature=target_sharpen_temperature)
if self.sinkhorn_iterations > 0:
target_probs = sinkhorn(
probabilities=target_probs,
iterations=self.sinkhorn_iterations,
gather_distributed=self.gather_distributed,
)
target_probs = target_probs.repeat((num_views, 1))
# cross entropy loss
loss = torch.mean(torch.sum(torch.log(anchor_probs ** (-target_probs)), dim=1))
# regularization loss
if self.regularization_weight > 0:
mean_anchor_probs = torch.mean(anchor_probs, dim=0)
reg_loss = self.regularization_loss(mean_anchor_probs=mean_anchor_probs)
loss += self.regularization_weight * reg_loss
return loss
def regularization_loss(self, mean_anchor_probs: Tensor) -> Tensor:
"""Calculates mean entropy regularization loss."""
loss = -torch.sum(torch.log(mean_anchor_probs ** (-mean_anchor_probs)))
loss += math.log(float(len(mean_anchor_probs)))
return loss
| 8,522 | 33.228916 | 88 | py |
lightly | lightly-master/lightly/loss/negative_cosine_similarity.py | """ Negative Cosine Similarity Loss Function """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import torch
from torch.nn.functional import cosine_similarity
class NegativeCosineSimilarity(torch.nn.Module):
"""Implementation of the Negative Cosine Simililarity used in the SimSiam[0] paper.
[0] SimSiam, 2020, https://arxiv.org/abs/2011.10566
Examples:
>>> # initialize loss function
>>> loss_fn = NegativeCosineSimilarity()
>>>
>>> # generate two representation tensors
>>> # with batch size 10 and dimension 128
>>> x0 = torch.randn(10, 128)
>>> x1 = torch.randn(10, 128)
>>>
>>> # calculate loss
>>> loss = loss_fn(x0, x1)
"""
def __init__(self, dim: int = 1, eps: float = 1e-8) -> None:
"""Same parameters as in torch.nn.CosineSimilarity
Args:
dim (int, optional):
Dimension where cosine similarity is computed. Default: 1
eps (float, optional):
Small value to avoid division by zero. Default: 1e-8
"""
super().__init__()
self.dim = dim
self.eps = eps
def forward(self, x0: torch.Tensor, x1: torch.Tensor) -> torch.Tensor:
return -cosine_similarity(x0, x1, self.dim, self.eps).mean()
| 1,343 | 29.545455 | 87 | py |
lightly | lightly-master/lightly/loss/ntx_ent_loss.py | """ Contrastive Loss Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import torch
from torch import distributed as torch_dist
from torch import nn
from lightly.loss.memory_bank import MemoryBankModule
from lightly.utils import dist
class NTXentLoss(MemoryBankModule):
"""Implementation of the Contrastive Cross Entropy Loss.
This implementation follows the SimCLR[0] paper. If you enable the memory
bank by setting the `memory_bank_size` value > 0 the loss behaves like
the one described in the MoCo[1] paper.
- [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
- [1] MoCo, 2020, https://arxiv.org/abs/1911.05722
Attributes:
temperature:
Scale logits by the inverse of the temperature.
memory_bank_size:
Number of negative samples to store in the memory bank.
Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.
gather_distributed:
If True then negatives from all gpus are gathered before the
loss calculation. This flag has no effect if memory_bank_size > 0.
Raises:
ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.
Examples:
>>> # initialize loss function without memory bank
>>> loss_fn = NTXentLoss(memory_bank_size=0)
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimCLR or MoCo model
>>> batch = torch.cat((t0, t1), dim=0)
>>> output = model(batch)
>>>
>>> # calculate loss
>>> loss = loss_fn(output)
"""
def __init__(
self,
temperature: float = 0.5,
memory_bank_size: int = 0,
gather_distributed: bool = False,
):
super(NTXentLoss, self).__init__(size=memory_bank_size)
self.temperature = temperature
self.gather_distributed = gather_distributed
self.cross_entropy = nn.CrossEntropyLoss(reduction="mean")
self.eps = 1e-8
if abs(self.temperature) < self.eps:
raise ValueError(
"Illegal temperature: abs({}) < 1e-8".format(self.temperature)
)
if gather_distributed and not torch_dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
def forward(self, out0: torch.Tensor, out1: torch.Tensor):
"""Forward pass through Contrastive Cross-Entropy Loss.
If used with a memory bank, the samples from the memory bank are used
as negative examples. Otherwise, within-batch samples are used as
negative samples.
Args:
out0:
Output projections of the first set of transformed images.
Shape: (batch_size, embedding_size)
out1:
Output projections of the second set of transformed images.
Shape: (batch_size, embedding_size)
Returns:
Contrastive Cross Entropy Loss value.
"""
device = out0.device
batch_size, _ = out0.shape
# normalize the output to length 1
out0 = nn.functional.normalize(out0, dim=1)
out1 = nn.functional.normalize(out1, dim=1)
# ask memory bank for negative samples and extend it with out1 if
# out1 requires a gradient, otherwise keep the same vectors in the
# memory bank (this allows for keeping the memory bank constant e.g.
# for evaluating the loss on the test set)
# out1: shape: (batch_size, embedding_size)
# negatives: shape: (embedding_size, memory_bank_size)
out1, negatives = super(NTXentLoss, self).forward(
out1, update=out0.requires_grad
)
# We use the cosine similarity, which is a dot product (einsum) here,
# as all vectors are already normalized to unit length.
# Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.
if negatives is not None:
# use negatives from memory bank
negatives = negatives.to(device)
# sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity
# of the i-th sample in the batch to its positive pair
sim_pos = torch.einsum("nc,nc->n", out0, out1).unsqueeze(-1)
# sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity
# of the i-th sample to the j-th negative sample
sim_neg = torch.einsum("nc,ck->nk", out0, negatives)
# set the labels to the first "class", i.e. sim_pos,
# so that it is maximized in relation to sim_neg
logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature
labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)
else:
# user other samples from batch as negatives
# and create diagonal mask that only selects similarities between
# views of the same image
if self.gather_distributed and dist.world_size() > 1:
# gather hidden representations from other processes
out0_large = torch.cat(dist.gather(out0), 0)
out1_large = torch.cat(dist.gather(out1), 0)
diag_mask = dist.eye_rank(batch_size, device=out0.device)
else:
# single process
out0_large = out0
out1_large = out1
diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)
# calculate similiarities
# here n = batch_size and m = batch_size * world_size
# the resulting vectors have shape (n, m)
logits_00 = torch.einsum("nc,mc->nm", out0, out0_large) / self.temperature
logits_01 = torch.einsum("nc,mc->nm", out0, out1_large) / self.temperature
logits_10 = torch.einsum("nc,mc->nm", out1, out0_large) / self.temperature
logits_11 = torch.einsum("nc,mc->nm", out1, out1_large) / self.temperature
# remove simliarities between same views of the same image
logits_00 = logits_00[~diag_mask].view(batch_size, -1)
logits_11 = logits_11[~diag_mask].view(batch_size, -1)
# concatenate logits
# the logits tensor in the end has shape (2*n, 2*m-1)
logits_0100 = torch.cat([logits_01, logits_00], dim=1)
logits_1011 = torch.cat([logits_10, logits_11], dim=1)
logits = torch.cat([logits_0100, logits_1011], dim=0)
# create labels
labels = torch.arange(batch_size, device=device, dtype=torch.long)
if self.gather_distributed:
labels = labels + dist.rank() * batch_size
labels = labels.repeat(2)
loss = self.cross_entropy(logits, labels)
return loss
| 7,186 | 39.376404 | 104 | py |
lightly | lightly-master/lightly/loss/pmsn_loss.py | from typing import Callable
import torch
import torch.nn.functional as F
from torch import Tensor
from lightly.loss.msn_loss import MSNLoss
class PMSNLoss(MSNLoss):
"""Implementation of the loss function from PMSN [0] using a power law target
distribution.
- [0]: Prior Matching for Siamese Networks, 2022, https://arxiv.org/abs/2210.07277
Attributes:
temperature:
Similarities between anchors and targets are scaled by the inverse of
the temperature. Must be in (0, inf).
sinkhorn_iterations:
Number of sinkhorn normalization iterations on the targets.
regularization_weight:
Weight factor lambda by which the regularization loss is scaled. Set to 0
to disable regularization.
power_law_exponent:
Exponent for power law distribution. Entry k of the distribution is
proportional to (1 / k) ^ power_law_exponent, with k ranging from 1 to dim + 1.
gather_distributed:
If True, then target probabilities are gathered from all GPUs.
Examples:
>>> # initialize loss function
>>> loss_fn = PMSNLoss()
>>>
>>> # generate anchors and targets of images
>>> anchors = transforms(images)
>>> targets = transforms(images)
>>>
>>> # feed through PMSN model
>>> anchors_out = model(anchors)
>>> targets_out = model.target(targets)
>>>
>>> # calculate loss
>>> loss = loss_fn(anchors_out, targets_out, prototypes=model.prototypes)
"""
def __init__(
self,
temperature: float = 0.1,
sinkhorn_iterations: int = 3,
regularization_weight: float = 1,
power_law_exponent: float = 0.25,
gather_distributed: bool = False,
):
super().__init__(
temperature=temperature,
sinkhorn_iterations=sinkhorn_iterations,
regularization_weight=regularization_weight,
gather_distributed=gather_distributed,
)
self.power_law_exponent = power_law_exponent
def regularization_loss(self, mean_anchor_probs: Tensor) -> Tensor:
"""Calculates regularization loss with a power law target distribution."""
power_dist = _power_law_distribution(
size=mean_anchor_probs.shape[0],
exponent=self.power_law_exponent,
device=mean_anchor_probs.device,
)
loss = F.kl_div(input=mean_anchor_probs, target=power_dist, reduction="sum")
return loss
class PMSNCustomLoss(MSNLoss):
"""Implementation of the loss function from PMSN [0] with a custom target
distribution.
- [0]: Prior Matching for Siamese Networks, 2022, https://arxiv.org/abs/2210.07277
Attributes:
target_distribution:
A function that takes the mean anchor probabilities tensor with shape (dim,)
as input and returns a target probability distribution tensor with the same
shape. The returned distribution should sum up to one. The final
regularization loss is calculated as KL(mean_anchor_probs, target_dist)
where KL is the Kullback-Leibler divergence.
temperature:
Similarities between anchors and targets are scaled by the inverse of
the temperature. Must be in (0, inf).
sinkhorn_iterations:
Number of sinkhorn normalization iterations on the targets.
regularization_weight:
Weight factor lambda by which the regularization loss is scaled. Set to 0
to disable regularization.
gather_distributed:
If True, then target probabilities are gathered from all GPUs.
Examples:
>>> # define custom target distribution
>>> def my_uniform_distribution(mean_anchor_probabilities: Tensor) -> Tensor:
>>> dim = mean_anchor_probabilities.shape[0]
>>> return mean_anchor_probabilities.new_ones(dim) / dim
>>>
>>> # initialize loss function
>>> loss_fn = PMSNCustomLoss(target_distribution=my_uniform_distribution)
>>>
>>> # generate anchors and targets of images
>>> anchors = transforms(images)
>>> targets = transforms(images)
>>>
>>> # feed through PMSN model
>>> anchors_out = model(anchors)
>>> targets_out = model.target(targets)
>>>
>>> # calculate loss
>>> loss = loss_fn(anchors_out, targets_out, prototypes=model.prototypes)
"""
def __init__(
self,
target_distribution: Callable[[Tensor], Tensor],
temperature: float = 0.1,
sinkhorn_iterations: int = 3,
regularization_weight: float = 1,
gather_distributed: bool = False,
):
super().__init__(
temperature=temperature,
sinkhorn_iterations=sinkhorn_iterations,
regularization_weight=regularization_weight,
gather_distributed=gather_distributed,
)
self.target_distribution = target_distribution
def regularization_loss(self, mean_anchor_probs: Tensor) -> Tensor:
"""Calculates regularization loss with a custom target distribution."""
target_dist = self.target_distribution(mean_anchor_probs).to(
mean_anchor_probs.device
)
loss = F.kl_div(input=mean_anchor_probs, target=target_dist, reduction="sum")
return loss
def _power_law_distribution(size: int, exponent: float, device: torch.device) -> Tensor:
"""Returns a power law distribution summing up to 1."""
k = torch.arange(1, size + 1, device=device)
power_dist = k ** (-exponent)
power_dist = power_dist / power_dist.sum()
return power_dist
| 5,790 | 37.098684 | 91 | py |
lightly | lightly-master/lightly/loss/swav_loss.py | from typing import List
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
@torch.no_grad()
def sinkhorn(
out: torch.Tensor,
iterations: int = 3,
epsilon: float = 0.05,
gather_distributed: bool = False,
) -> torch.Tensor:
"""Distributed sinkhorn algorithm.
As outlined in [0] and implemented in [1].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
[1]: https://github.com/facebookresearch/swav/
Args:
out:
Similarity of the features and the SwaV prototypes.
iterations:
Number of sinkhorn iterations.
epsilon:
Temperature parameter.
gather_distributed:
If True then features from all gpus are gathered to calculate the
soft codes Q.
Returns:
Soft codes Q assigning each feature to a prototype.
"""
world_size = 1
if gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
# get the exponential matrix and make it sum to 1
Q = torch.exp(out / epsilon).t()
sum_Q = torch.sum(Q)
if world_size > 1:
dist.all_reduce(sum_Q)
Q /= sum_Q
B = Q.shape[1] * world_size
for _ in range(iterations):
# normalize rows
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if world_size > 1:
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
# normalize columns
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
class SwaVLoss(nn.Module):
"""Implementation of the SwaV loss.
Attributes:
temperature:
Temperature parameter used for cross entropy calculations.
sinkhorn_iterations:
Number of iterations of the sinkhorn algorithm.
sinkhorn_epsilon:
Temperature parameter used in the sinkhorn algorithm.
sinkhorn_gather_distributed:
If True then features from all gpus are gathered to calculate the
soft codes in the sinkhorn algorithm.
"""
def __init__(
self,
temperature: float = 0.1,
sinkhorn_iterations: int = 3,
sinkhorn_epsilon: float = 0.05,
sinkhorn_gather_distributed: bool = False,
):
super(SwaVLoss, self).__init__()
if sinkhorn_gather_distributed and not dist.is_available():
raise ValueError(
"sinkhorn_gather_distributed is True but torch.distributed is not "
"available. Please set gather_distributed=False or install a torch "
"version with distributed support."
)
self.temperature = temperature
self.sinkhorn_iterations = sinkhorn_iterations
self.sinkhorn_epsilon = sinkhorn_epsilon
self.sinkhorn_gather_distributed = sinkhorn_gather_distributed
def subloss(self, z: torch.Tensor, q: torch.Tensor):
"""Calculates the cross entropy for the SwaV prediction problem.
Args:
z:
Similarity of the features and the SwaV prototypes.
q:
Codes obtained from Sinkhorn iterations.
Returns:
Cross entropy between predictions z and codes q.
"""
return -torch.mean(
torch.sum(q * F.log_softmax(z / self.temperature, dim=1), dim=1)
)
def forward(
self,
high_resolution_outputs: List[torch.Tensor],
low_resolution_outputs: List[torch.Tensor],
queue_outputs: List[torch.Tensor] = None,
):
"""Computes the SwaV loss for a set of high and low resolution outputs.
Args:
high_resolution_outputs:
List of similarities of features and SwaV prototypes for the
high resolution crops.
low_resolution_outputs:
List of similarities of features and SwaV prototypes for the
low resolution crops.
queue_outputs:
List of similarities of features and SwaV prototypes for the
queue of high resolution crops from previous batches.
Returns:
Swapping assignments between views loss (SwaV) as described in [0].
[0]: SwaV, 2020, https://arxiv.org/abs/2006.09882
"""
n_crops = len(high_resolution_outputs) + len(low_resolution_outputs)
# multi-crop iterations
loss = 0.0
for i in range(len(high_resolution_outputs)):
# compute codes of i-th high resolution crop
with torch.no_grad():
outputs = high_resolution_outputs[i].detach()
# Append queue outputs
if queue_outputs is not None:
outputs = torch.cat((outputs, queue_outputs[i].detach()))
# Compute the codes
q = sinkhorn(
outputs,
iterations=self.sinkhorn_iterations,
epsilon=self.sinkhorn_epsilon,
gather_distributed=self.sinkhorn_gather_distributed,
)
# Drop queue similarities
if queue_outputs is not None:
q = q[: len(high_resolution_outputs[i])]
# compute subloss for each pair of crops
subloss = 0.0
for v in range(len(high_resolution_outputs)):
if v != i:
subloss += self.subloss(high_resolution_outputs[v], q)
for v in range(len(low_resolution_outputs)):
subloss += self.subloss(low_resolution_outputs[v], q)
loss += subloss / (n_crops - 1)
return loss / len(high_resolution_outputs)
| 5,741 | 30.9 | 84 | py |
lightly | lightly-master/lightly/loss/sym_neg_cos_sim_loss.py | """ Symmetrized Negative Cosine Similarity Loss Functions """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
class SymNegCosineSimilarityLoss(torch.nn.Module):
"""Implementation of the Symmetrized Loss used in the SimSiam[0] paper.
[0] SimSiam, 2020, https://arxiv.org/abs/2011.10566
Examples:
>>> # initialize loss function
>>> loss_fn = SymNegCosineSimilarityLoss()
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through SimSiam model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(self) -> None:
super().__init__()
warnings.warn(
Warning(
"SymNegCosineSimiliarityLoss will be deprecated in favor of "
+ "NegativeCosineSimilarity in the future."
),
DeprecationWarning,
)
def _neg_cosine_simililarity(self, x, y):
v = -torch.nn.functional.cosine_similarity(x, y.detach(), dim=-1).mean()
return v
def forward(self, out0: torch.Tensor, out1: torch.Tensor):
"""Forward pass through Symmetric Loss.
Args:
out0:
Output projections of the first set of transformed images.
Expects the tuple to be of the form (z0, p0), where z0 is
the output of the backbone and projection mlp, and p0 is the
output of the prediction head.
out1:
Output projections of the second set of transformed images.
Expects the tuple to be of the form (z1, p1), where z1 is
the output of the backbone and projection mlp, and p1 is the
output of the prediction head.
Returns:
Contrastive Cross Entropy Loss value.
Raises:
ValueError if shape of output is not multiple of batch_size.
"""
z0, p0 = out0
z1, p1 = out1
loss = (
self._neg_cosine_simililarity(p0, z1) / 2
+ self._neg_cosine_simililarity(p1, z0) / 2
)
return loss
| 2,308 | 28.987013 | 80 | py |
lightly | lightly-master/lightly/loss/tico_loss.py | import torch
import torch.distributed as dist
from lightly.utils.dist import gather
class TiCoLoss(torch.nn.Module):
"""Implementation of the Tico Loss from Tico[0] paper.
This implementation takes inspiration from the code published
by sayannag using Lightly. [1]
[0] Jiachen Zhu et. al, 2022, Tico... https://arxiv.org/abs/2206.10698
[1] https://github.com/sayannag/TiCo-pytorch
Attributes:
Args:
beta:
Coefficient for the EMA update of the covariance
Defaults to 0.9 [0].
rho:
Weight for the covariance term of the loss
Defaults to 20.0 [0].
gather_distributed:
If True then the cross-correlation matrices from all gpus are
gathered and summed before the loss calculation.
Examples:
>>> # initialize loss function
>>> loss_fn = TiCoLoss()
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(
self,
beta: float = 0.9,
rho: float = 20.0,
gather_distributed: bool = False,
):
super(TiCoLoss, self).__init__()
if gather_distributed and not dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
self.beta = beta
self.rho = rho
self.C = None
self.gather_distributed = gather_distributed
def forward(
self,
z_a: torch.Tensor,
z_b: torch.Tensor,
update_covariance_matrix: bool = True,
) -> torch.Tensor:
"""Tico Loss computation. It maximize the agreement among embeddings of different distorted versions of the same image
while avoiding collapse using Covariance matrix.
Args:
z_a:
Tensor of shape [batch_size, num_features=256]. Output of the learned backbone.
z_b:
Tensor of shape [batch_size, num_features=256]. Output of the momentum updated backbone.
update_covariance_matrix:
Parameter to update the covariance matrix at each iteration.
Returns:
The loss.
"""
assert (
z_a.shape[0] > 1 and z_b.shape[0] > 1
), f"z_a and z_b must have batch size > 1 but found {z_a.shape[0]} and {z_b.shape[0]}"
assert (
z_a.shape == z_b.shape
), f"z_a and z_b must have same shape but found {z_a.shape} and {z_b.shape}."
# gather all batches
if self.gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
if world_size > 1:
z_a = torch.cat(gather(z_a), dim=0)
z_b = torch.cat(gather(z_b), dim=0)
# normalize image
z_a = torch.nn.functional.normalize(z_a, dim=1)
z_b = torch.nn.functional.normalize(z_b, dim=1)
# compute auxiliary matrix B
B = torch.mm(z_a.T, z_a) / z_a.shape[0]
# init covariance matrix
if self.C is None:
self.C = B.new_zeros(B.shape).detach()
# compute loss
C = self.beta * self.C + (1 - self.beta) * B
loss = (
1
- (z_a * z_b).sum(dim=1).mean()
+ self.rho * (torch.mm(z_a, C) * z_a).sum(dim=1).mean()
)
# update covariance matrix
if update_covariance_matrix:
self.C = C.detach()
return loss
| 3,887 | 30.609756 | 126 | py |
lightly | lightly-master/lightly/loss/vicreg_loss.py | import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch import Tensor
from lightly.utils.dist import gather
class VICRegLoss(torch.nn.Module):
"""Implementation of the VICReg loss [0].
This implementation is based on the code published by the authors [1].
- [0] VICReg, 2022, https://arxiv.org/abs/2105.04906
- [1] https://github.com/facebookresearch/vicreg/
Attributes:
lambda_param:
Scaling coefficient for the invariance term of the loss.
mu_param:
Scaling coefficient for the variance term of the loss.
nu_param:
Scaling coefficient for the covariance term of the loss.
gather_distributed:
If True then the cross-correlation matrices from all gpus are gathered and
summed before the loss calculation.
eps:
Epsilon for numerical stability.
Examples:
>>> # initialize loss function
>>> loss_fn = VICRegLoss()
>>>
>>> # generate two random transforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss
>>> loss = loss_fn(out0, out1)
"""
def __init__(
self,
lambda_param: float = 25.0,
mu_param: float = 25.0,
nu_param: float = 1.0,
gather_distributed: bool = False,
eps=0.0001,
):
super(VICRegLoss, self).__init__()
if gather_distributed and not dist.is_available():
raise ValueError(
"gather_distributed is True but torch.distributed is not available. "
"Please set gather_distributed=False or install a torch version with "
"distributed support."
)
self.lambda_param = lambda_param
self.mu_param = mu_param
self.nu_param = nu_param
self.gather_distributed = gather_distributed
self.eps = eps
def forward(self, z_a: torch.Tensor, z_b: torch.Tensor) -> torch.Tensor:
"""Returns VICReg loss.
Args:
z_a:
Tensor with shape (batch_size, ..., dim).
z_b:
Tensor with shape (batch_size, ..., dim).
"""
assert (
z_a.shape[0] > 1 and z_b.shape[0] > 1
), f"z_a and z_b must have batch size > 1 but found {z_a.shape[0]} and {z_b.shape[0]}"
assert (
z_a.shape == z_b.shape
), f"z_a and z_b must have same shape but found {z_a.shape} and {z_b.shape}."
# invariance term of the loss
inv_loss = invariance_loss(x=z_a, y=z_b)
# gather all batches
if self.gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
if world_size > 1:
z_a = torch.cat(gather(z_a), dim=0)
z_b = torch.cat(gather(z_b), dim=0)
var_loss = 0.5 * (
variance_loss(x=z_a, eps=self.eps) + variance_loss(x=z_b, eps=self.eps)
)
cov_loss = covariance_loss(x=z_a) + covariance_loss(x=z_b)
loss = (
self.lambda_param * inv_loss
+ self.mu_param * var_loss
+ self.nu_param * cov_loss
)
return loss
def invariance_loss(x: Tensor, y: Tensor) -> Tensor:
"""Returns VICReg invariance loss.
Args:
x:
Tensor with shape (batch_size, ..., dim).
y:
Tensor with shape (batch_size, ..., dim).
"""
return F.mse_loss(x, y)
def variance_loss(x: Tensor, eps: float = 0.0001) -> Tensor:
"""Returns VICReg variance loss.
Args:
x:
Tensor with shape (batch_size, ..., dim).
eps:
Epsilon for numerical stability.
"""
x = x - x.mean(dim=0)
std = torch.sqrt(x.var(dim=0) + eps)
loss = torch.mean(F.relu(1.0 - std))
return loss
def covariance_loss(x: Tensor) -> Tensor:
"""Returns VICReg covariance loss.
Generalized version of the covariance loss with support for tensors with more than
two dimensions. Adapted from VICRegL:
https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L299
Args:
x:
Tensor with shape (batch_size, ..., dim).
"""
x = x - x.mean(dim=0)
batch_size = x.size(0)
dim = x.size(-1)
# nondiag_mask has shape (dim, dim) with 1s on all non-diagonal entries.
nondiag_mask = ~torch.eye(dim, device=x.device, dtype=torch.bool)
# cov has shape (..., dim, dim)
cov = torch.einsum("b...c,b...d->...cd", x, x) / (batch_size - 1)
loss = cov[..., nondiag_mask].pow(2).sum(-1) / dim
return loss.mean()
| 4,817 | 30.285714 | 114 | py |
lightly | lightly-master/lightly/loss/vicregl_loss.py | from typing import Optional, Sequence, Tuple
import torch
import torch.distributed as dist
from torch import Tensor
from lightly.loss.vicreg_loss import (
VICRegLoss,
covariance_loss,
invariance_loss,
variance_loss,
)
from lightly.models.utils import nearest_neighbors
from lightly.utils.dist import gather
class VICRegLLoss(torch.nn.Module):
"""Implementation of the VICRegL loss from VICRegL paper [0].
This implementation follows the code published by the authors [1].
- [0]: VICRegL, 2022, https://arxiv.org/abs/2210.01571
- [1]: https://github.com/facebookresearch/VICRegL
Attributes:
lambda_param:
Coefficient for the invariance term of the loss.
mu_param:
Coefficient for the variance term of the loss.
nu_param:
Coefficient for the covariance term of the loss.
alpha:
Coefficient to weight global with local loss. The final loss is computed as
(self.alpha * global_loss + (1-self.alpha) * local_loss).
gather_distributed:
If True then the cross-correlation matrices from all gpus are gathered and
summed before the loss calculation.
eps:
Epsilon for numerical stability.
num_matches:
Number of local features to match using nearest neighbors.
Examples:
>>> # initialize loss function
>>> criterion = VICRegLLoss()
>>> transform = VICRegLTransform(n_global_views=2, n_local_views=4)
>>>
>>> # generate two random transforms of images
>>> views_and_grids = transform(images)
>>> views = views_and_grids[:6] # 2 global views + 4 local views
>>> grids = views_and_grids[6:]
>>>
>>> # feed through model images
>>> features = [model(view) for view in views]
>>>
>>> # calculate loss
>>> loss = criterion(
... global_view_features=features[:2],
... global_view_grids=grids[:2],
... local_view_features=features[2:],
... local_view_grids=grids[2:],
... )
"""
def __init__(
self,
lambda_param: float = 25.0,
mu_param: float = 25.0,
nu_param: float = 1.0,
alpha: float = 0.75,
gather_distributed: bool = False,
eps: float = 0.0001,
num_matches: Tuple[int, int] = (20, 4),
):
super(VICRegLLoss, self).__init__()
self.alpha = alpha
self.num_matches = num_matches
self.lambda_param = lambda_param
self.mu_param = mu_param
self.nu_param = nu_param
self.eps = eps
self.gather_distributed = gather_distributed
# Note: We multiply nu_param by 0.5 because the implementations of the VICReg
# covariance loss differ by a factor of 0.5 between the original VICReg and
# VICRegL codebases. See:
# - VICReg: https://github.com/facebookresearch/vicreg/blob/4e12602fd495af83efd1631fbe82523e6db092e0/main_vicreg.py#L211-L213
# - VICRegL: https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L308-L312
self.vicreg_loss = VICRegLoss(
lambda_param=lambda_param,
mu_param=mu_param,
nu_param=0.5 * nu_param,
eps=eps,
gather_distributed=gather_distributed,
)
def forward(
self,
global_view_features: Sequence[Tuple[Tensor, Tensor]],
global_view_grids: Sequence[Tensor],
local_view_features: Optional[Sequence[Tuple[Tensor, Tensor]]] = None,
local_view_grids: Optional[Sequence[Tensor]] = None,
) -> Tensor:
"""Computes the global and local VICRegL loss from the input features.
Args:
global_view_features:
Sequence of (global_features, local_features) tuples from the global
crop views. global_features must have size
(batch_size, global_feature_dim) and local_features must have size
(batch_size, grid_height, grid_width, local_feature_dim).
global_view_grids:
Sequence of grid tensors from the global crop views. Every tensor must
have shape (batch_size, grid_height, grid_width, 2).
local_view_features:
Sequence of (global_features, local_features) tuples from the local crop
views. global_features must have size
(batch_size, global_feature_dim) and local_features must have size
(batch_size, grid_height, grid_width, local_feature_dim). Note that
grid_height and grid_width can differ between global_view_features and
local_view_features.
local_view_grids:
Sequence of grid tensors from the local crop views. Every tensor must
have shape (batch_size, grid_height, grid_width, 2). Note that
grid_height and grid_width can differ between global_view_features and
local_view_features.
Returns:
Weighted sum of the global and local loss, calculated as:
`self.alpha * global_loss + (1-self.alpha) * local_loss`.
"""
if len(global_view_features) != len(global_view_grids):
raise ValueError(
f"global_view_features and global_view_grids must have same length "
f"but found {len(global_view_features)} and {len(global_view_grids)}."
)
if local_view_features is not None and local_view_grids is not None:
if len(local_view_features) != len(local_view_grids):
raise ValueError(
f"local_view_features and local_view_grids must have same length "
f"but found {len(local_view_features)} and {len(local_view_grids)}."
)
elif local_view_features is not None or local_view_grids is not None:
raise ValueError(
f"local_view_features and local_view_grids must either both be set or "
f"None but found {type(local_view_features)} and {type(local_view_grids)}."
)
# calculate loss from global features
global_loss = self._global_loss(
global_view_features=global_view_features,
local_view_features=local_view_features,
)
# calculate loss from local features
local_loss = self._local_loss(
global_view_features=global_view_features,
global_view_grids=global_view_grids,
local_view_features=local_view_features,
local_view_grids=local_view_grids,
)
loss = self.alpha * global_loss + (1 - self.alpha) * local_loss
return loss
def _global_loss(
self,
global_view_features: Sequence[Tuple[Tensor, Tensor]],
local_view_features: Optional[Sequence[Tuple[Tensor, Tensor]]] = None,
) -> Tensor:
"""Returns global features loss."""
inv_loss = self._global_invariance_loss(
global_view_features=global_view_features,
local_view_features=local_view_features,
)
var_loss, cov_loss = self._global_variance_and_covariance_loss(
global_view_features=global_view_features,
local_view_features=local_view_features,
)
return (
self.lambda_param * inv_loss
+ self.mu_param * var_loss
+ self.nu_param * cov_loss
)
def _global_invariance_loss(
self,
global_view_features: Sequence[Tuple[Tensor, Tensor]],
local_view_features: Optional[Sequence[Tuple[Tensor, Tensor]]] = None,
) -> Tensor:
"""Returns invariance loss from global features."""
loss = 0
loss_count = 0
for global_features_a, _ in global_view_features:
# global views
for global_features_b, _ in global_view_features:
if global_features_a is not global_features_b:
loss += invariance_loss(global_features_a, global_features_b)
loss_count += 1
# local views
if local_view_features is not None:
for global_features_b, _ in local_view_features:
loss += invariance_loss(global_features_a, global_features_b)
loss_count += 1
return loss / loss_count
def _global_variance_and_covariance_loss(
self,
global_view_features: Sequence[Tuple[Tensor, Tensor]],
local_view_features: Optional[Sequence[Tuple[Tensor, Tensor]]] = None,
) -> Tuple[Tensor, Tensor]:
"""Returns variance and covariance loss from global features."""
view_features = list(global_view_features)
if local_view_features is not None:
view_features = view_features + list(local_view_features)
var_loss = 0
cov_loss = 0
loss_count = 0
for global_features, _ in view_features:
if self.gather_distributed and dist.is_initialized():
world_size = dist.get_world_size()
if world_size > 1:
global_features = torch.cat(gather(global_features), dim=0)
var_loss += variance_loss(x=global_features, eps=self.eps)
cov_loss += covariance_loss(x=global_features)
loss_count += 1
return var_loss / loss_count, cov_loss / loss_count
def _local_loss(
self,
global_view_features: Sequence[Tuple[Tensor, Tensor]],
global_view_grids: Sequence[Tensor],
local_view_features: Optional[Sequence[Tuple[Tensor, Tensor]]] = None,
local_view_grids: Optional[Sequence[Tensor]] = None,
) -> Tensor:
"""Returns loss from local features based on nearest neighbor matching.
Note: Our nearest neighbor implementation returns the selected features sorted
by increasing matching distance, whereas the implementation by the VICRegL
authors returns features in a different order [1]. This results in slight
differences of the final local loss. The difference results from feature
centering which depends on the order.
Note: Nearest neighbor matching slightly differs between the paper [0] and the
original implementation of the authors [1]. The paper mentions that
num_matches is set to 20 for global views and 4 for local views. The code
uses 20 matches for the first NN search and 4 matches for the second search,
regardless of global or local views:
https://github.com/facebookresearch/VICRegL/blob/803ae4c8cd1649a820f03afb4793763e95317620/main_vicregl.py#L329-L334
Our implementation follows the original code and ignores view type.
"""
loss = 0
loss_count = 0
for (_, z_a_local_features), grid_a in zip(
global_view_features, global_view_grids
):
# global views
for (_, z_b_local_features), grid_b in zip(
global_view_features, global_view_grids
):
if z_a_local_features is not z_b_local_features:
loss += self._local_l2_loss(
z_a=z_a_local_features,
z_b=z_b_local_features,
)
loss += self._local_location_loss(
z_a=z_a_local_features,
z_b=z_b_local_features,
grid_a=grid_a,
grid_b=grid_b,
)
loss_count += 1
# local views
if local_view_features is not None and local_view_grids is not None:
for (_, z_b_local_features), grid_b in zip(
local_view_features, local_view_grids
):
loss += self._local_l2_loss(
z_a=z_a_local_features,
z_b=z_b_local_features,
)
loss += self._local_location_loss(
z_a=z_a_local_features,
z_b=z_b_local_features,
grid_a=grid_a,
grid_b=grid_b,
)
loss_count += 1
return loss / loss_count
def _local_l2_loss(
self,
z_a: Tensor,
z_b: Tensor,
) -> Tensor:
"""Returns loss for local features matched with neareast neighbors using L2
distance in the feature space.
Args:
z_a:
Local feature tensor with shape (batch_size, heigh, width, dim).
z_b:
Local feature tensor with shape (batch_size, heigh, width, dim).
"""
# (batch_size, heigh, width, dim) -> (batch_size, heigh * width, dim)
z_a = z_a.flatten(start_dim=1, end_dim=2)
z_b = z_b.flatten(start_dim=1, end_dim=2)
z_a_filtered, z_a_nn = self._nearest_neighbors_on_l2(
input_features=z_a, candidate_features=z_b, num_matches=self.num_matches[0]
)
z_b_filtered, z_b_nn = self._nearest_neighbors_on_l2(
input_features=z_b, candidate_features=z_a, num_matches=self.num_matches[1]
)
loss_a = self.vicreg_loss.forward(z_a=z_a_filtered, z_b=z_a_nn)
loss_b = self.vicreg_loss.forward(z_a=z_b_filtered, z_b=z_b_nn)
return 0.5 * (loss_a + loss_b)
def _local_location_loss(
self,
z_a: Tensor,
z_b: Tensor,
grid_a: Tensor,
grid_b: Tensor,
) -> Tensor:
"""Returns loss for local features matched with nearest neighbors based on
the feature location.
Args:
z_a:
Local feature tensor with shape (batch_size, heigh, width, dim).
z_b:
Local feature tensor with shape (batch_size, heigh, width, dim).
Note that height and width can be different from z_a.
grid_a:
Grid tensor with shape (batch_size, height, width, 2).
grid_b:
Grid tensor with shape (batch_size, height, width, 2).
Note that height and width can be different from grid_a.
"""
# (batch_size, heigh, width, dim) -> (batch_size, heigh * width, dim)
z_a = z_a.flatten(start_dim=1, end_dim=2)
z_b = z_b.flatten(start_dim=1, end_dim=2)
# (batch_size, heigh, width, 2) -> (batch_size, heigh * width, 2)
grid_a = grid_a.flatten(start_dim=1, end_dim=2)
grid_b = grid_b.flatten(start_dim=1, end_dim=2)
z_a_filtered, z_a_nn = self._nearest_neighbors_on_grid(
input_features=z_a,
candidate_features=z_b,
input_grid=grid_a,
candidate_grid=grid_b,
num_matches=self.num_matches[0],
)
z_b_filtered, z_b_nn = self._nearest_neighbors_on_grid(
input_features=z_b,
candidate_features=z_a,
input_grid=grid_b,
candidate_grid=grid_a,
num_matches=self.num_matches[1],
)
loss_a = self.vicreg_loss.forward(z_a=z_a_filtered, z_b=z_a_nn)
loss_b = self.vicreg_loss.forward(z_a=z_b_filtered, z_b=z_b_nn)
return 0.5 * (loss_a + loss_b)
def _nearest_neighbors_on_l2(
self, input_features: Tensor, candidate_features: Tensor, num_matches: int
) -> Tuple[Tensor, Tensor]:
"""Finds num_matches closest neighbors of input_features in candidate_features.
Args:
input_features:
Local features tensor with shape (batch_size, height * width, dim).
candidate_features:
Local features tensor with shape (batch_size, height * width, dim).
Note that height and width can be different from input_features.
Returns:
(nn_input, nn_candidate) tuple containing two tensors with shape
(batch_size, num_matches, dim).
"""
distances = torch.cdist(input_features, candidate_features)
return nearest_neighbors(
input_features, candidate_features, distances, num_matches
)
def _nearest_neighbors_on_grid(
self,
input_features: Tensor,
candidate_features: Tensor,
input_grid: Tensor,
candidate_grid: Tensor,
num_matches: int,
) -> Tuple[Tensor, Tensor]:
"""Finds num_matches closest neighbors of input_features in candidate_features
based on the distance between the features defined by input_grid and
candidate_grid.
Args:
input_features:
Local features tensor with shape (batch_size, height * width, dim).
candidate_features:
Local features tensor with shape (batch_size, height * width, dim).
Note that height and width can be different from input_features.
input_grid:
Grid tensor with shape (batch_size, height, width, 2).
candidate_grid:
Grid tensor with shape (batch_size, height, width, 2). Note that height
and width can be different from input_grid.
Returns:
(nn_input, nn_candidate) tuple containing two tensors with shape
(batch_size, num_matches, dim).
"""
distances = torch.cdist(input_grid, candidate_grid)
return nearest_neighbors(
input_features, candidate_features, distances, num_matches
)
| 17,701 | 40.75 | 136 | py |
lightly | lightly-master/lightly/loss/regularizer/__init__.py | """The lightly.loss.regularizer package provides regularizers for self-supervised learning. """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.loss.regularizer.co2 import CO2Regularizer
| 230 | 27.875 | 95 | py |
lightly | lightly-master/lightly/loss/regularizer/co2.py | """ CO2 Regularizer """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import torch
from lightly.loss.memory_bank import MemoryBankModule
class CO2Regularizer(MemoryBankModule):
"""Implementation of the CO2 regularizer [0] for self-supervised learning.
[0] CO2, 2021, https://arxiv.org/abs/2010.02217
Attributes:
alpha:
Weight of the regularization term.
t_consistency:
Temperature used during softmax calculations.
memory_bank_size:
Number of negative samples to store in the memory bank.
Use 0 to use the second batch for negative samples.
Examples:
>>> # initialize loss function for MoCo
>>> loss_fn = NTXentLoss(memory_bank_size=4096)
>>>
>>> # initialize CO2 regularizer
>>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096)
>>>
>>> # generate two random trasnforms of images
>>> t0 = transforms(images)
>>> t1 = transforms(images)
>>>
>>> # feed through the MoCo model
>>> out0, out1 = model(t0, t1)
>>>
>>> # calculate loss and apply regularizer
>>> loss = loss_fn(out0, out1) + co2(out0, out1)
"""
def __init__(
self, alpha: float = 1, t_consistency: float = 0.05, memory_bank_size: int = 0
):
super(CO2Regularizer, self).__init__(size=memory_bank_size)
# try-catch the KLDivLoss construction for backwards compatability
self.log_target = True
try:
self.kl_div = torch.nn.KLDivLoss(reduction="batchmean", log_target=True)
except TypeError:
self.log_target = False
self.kl_div = torch.nn.KLDivLoss(reduction="batchmean")
self.t_consistency = t_consistency
self.alpha = alpha
def _get_pseudo_labels(
self, out0: torch.Tensor, out1: torch.Tensor, negatives: torch.Tensor = None
):
"""Computes the soft pseudo labels across negative samples.
Args:
out0:
Output projections of the first set of transformed images (query).
Shape: bsz x n_ftrs
out1:
Output projections of the second set of transformed images (positive sample).
Shape: bsz x n_ftrs
negatives:
Negative samples to compare against. If this is None, the second
batch of images will be used as negative samples.
Shape: memory_bank_size x n_ftrs
Returns:
Log probability that a positive samples will classify each negative
sample as the positive sample.
Shape: bsz x (bsz - 1) or bsz x memory_bank_size
"""
batch_size, _ = out0.shape
if negatives is None:
# use second batch as negative samples
# l_pos has shape bsz x 1 and l_neg has shape bsz x bsz
l_pos = torch.einsum("nc,nc->n", [out0, out1]).unsqueeze(-1)
l_neg = torch.einsum("nc,ck->nk", [out0, out1.t()])
# remove elements on the diagonal
# l_neg has shape bsz x (bsz - 1)
l_neg = l_neg.masked_select(
~torch.eye(batch_size, dtype=bool, device=l_neg.device)
).view(batch_size, batch_size - 1)
else:
# use memory bank as negative samples
# l_pos has shape bsz x 1 and l_neg has shape bsz x memory_bank_size
negatives = negatives.to(out0.device)
l_pos = torch.einsum("nc,nc->n", [out0, out1]).unsqueeze(-1)
l_neg = torch.einsum("nc,ck->nk", [out0, negatives.clone().detach()])
# concatenate such that positive samples are at index 0
logits = torch.cat([l_pos, l_neg], dim=1)
# divide by temperature
logits = logits / self.t_consistency
# the input to kl_div is expected to be log(p)
return torch.nn.functional.log_softmax(logits, dim=-1)
def forward(self, out0: torch.Tensor, out1: torch.Tensor):
"""Computes the CO2 regularization term for two model outputs.
Args:
out0:
Output projections of the first set of transformed images.
out1:
Output projections of the second set of transformed images.
Returns:
The regularization term multiplied by the weight factor alpha.
"""
# normalize the output to length 1
out0 = torch.nn.functional.normalize(out0, dim=1)
out1 = torch.nn.functional.normalize(out1, dim=1)
# ask memory bank for negative samples and extend it with out1 if
# out1 requires a gradient, otherwise keep the same vectors in the
# memory bank (this allows for keeping the memory bank constant e.g.
# for evaluating the loss on the test set)
# if the memory_bank size is 0, negatives will be None
out1, negatives = super(CO2Regularizer, self).forward(out1, update=True)
# get log probabilities
p = self._get_pseudo_labels(out0, out1, negatives)
q = self._get_pseudo_labels(out1, out0, negatives)
# calculate symmetrized kullback leibler divergence
if self.log_target:
div = self.kl_div(p, q) + self.kl_div(q, p)
else:
# can't use log_target because of early torch version
div = self.kl_div(p, torch.exp(q)) + self.kl_div(q, torch.exp(p))
return self.alpha * 0.5 * div
| 5,522 | 37.089655 | 93 | py |
lightly | lightly-master/lightly/models/__init__.py | """The lightly.models package provides model implementations.
Note that the high-level building blocks will be deprecated with
lightly version 1.3.0. Instead, use low-level building blocks to build the
models yourself.
Example implementations for all models can be found here:
`Model Examples <https://docs.lightly.ai/self-supervised-learning/examples/models.html>`_
The package contains an implementation of the commonly used ResNet and
adaptations of the architecture which make self-supervised learning simpler.
The package also hosts the Lightly model zoo - a list of downloadable ResNet
checkpoints.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from lightly.models import utils
from lightly.models.barlowtwins import BarlowTwins
from lightly.models.byol import BYOL
from lightly.models.moco import MoCo
from lightly.models.nnclr import NNCLR
from lightly.models.resnet import ResNetGenerator
from lightly.models.simclr import SimCLR
from lightly.models.simsiam import SimSiam
from lightly.models.zoo import ZOO, checkpoints
| 1,071 | 34.733333 | 89 | py |
lightly | lightly-master/lightly/models/_momentum.py | """ Momentum Encoder """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import copy
import torch
import torch.nn as nn
def _deactivate_requires_grad(params):
"""Deactivates the requires_grad flag for all parameters."""
for param in params:
param.requires_grad = False
def _do_momentum_update(prev_params, params, m):
"""Updates the weights of the previous parameters."""
for prev_param, param in zip(prev_params, params):
prev_param.data = prev_param.data * m + param.data * (1.0 - m)
class _MomentumEncoderMixin:
"""Mixin to provide momentum encoder functionalities.
Provides the following functionalities:
- Momentum encoder initialization.
- Momentum updates.
- Batch shuffling and unshuffling.
To make use of the mixin, simply inherit from it:
>>> class MyMoCo(nn.Module, _MomentumEncoderMixin):
>>>
>>> def __init__(self, backbone):
>>> super(MyMoCo, self).__init__()
>>>
>>> self.backbone = backbone
>>> self.projection_head = get_projection_head()
>>>
>>> # initialize momentum_backbone and momentum_projection_head
>>> self._init_momentum_encoder()
>>>
>>> def forward(self, x: torch.Tensor):
>>>
>>> # do the momentum update
>>> self._momentum_update(0.999)
>>>
>>> # use momentum backbone
>>> y = self.momentum_backbone(x)
>>> y = self.momentum_projection_head(y)
"""
m: float
backbone: nn.Module
projection_head: nn.Module
momentum_backbone: nn.Module
momentum_projection_head: nn.Module
def _init_momentum_encoder(self):
"""Initializes momentum backbone and a momentum projection head."""
assert self.backbone is not None
assert self.projection_head is not None
self.momentum_backbone = copy.deepcopy(self.backbone)
self.momentum_projection_head = copy.deepcopy(self.projection_head)
_deactivate_requires_grad(self.momentum_backbone.parameters())
_deactivate_requires_grad(self.momentum_projection_head.parameters())
@torch.no_grad()
def _momentum_update(self, m: float = 0.999):
"""Performs the momentum update for the backbone and projection head."""
_do_momentum_update(
self.momentum_backbone.parameters(),
self.backbone.parameters(),
m=m,
)
_do_momentum_update(
self.momentum_projection_head.parameters(),
self.projection_head.parameters(),
m=m,
)
@torch.no_grad()
def _batch_shuffle(self, batch: torch.Tensor):
"""Returns the shuffled batch and the indices to undo."""
batch_size = batch.shape[0]
shuffle = torch.randperm(batch_size, device=batch.device)
return batch[shuffle], shuffle
@torch.no_grad()
def _batch_unshuffle(self, batch: torch.Tensor, shuffle: torch.Tensor):
"""Returns the unshuffled batch."""
unshuffle = torch.argsort(shuffle)
return batch[unshuffle]
| 3,144 | 30.767677 | 80 | py |
lightly | lightly-master/lightly/models/barlowtwins.py | """ Barlow Twins resnet-based Model [0]
[0] Zbontar,J. et.al. 2021. Barlow Twins... https://arxiv.org/abs/2103.03230
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models.modules import BarlowTwinsProjectionHead
class BarlowTwins(nn.Module):
"""Implementation of BarlowTwins[0] network.
Recommended loss: :py:class:`lightly.loss.barlow_twins_loss.BarlowTwinsLoss`
Default params are the ones explained in the original paper [0].
[0] Zbontar,J. et.al. 2021. Barlow Twins... https://arxiv.org/abs/2103.03230
Attributes:
backbone:
Backbone model to extract features from images.
ResNet-50 in original paper [0].
num_ftrs:
Dimension of the embedding (before the projection head).
proj_hidden_dim:
Dimension of the hidden layer of the projection head. This should
be the same size as `num_ftrs`.
out_dim:
Dimension of the output (after the projection head).
"""
def __init__(
self,
backbone: nn.Module,
num_ftrs: int = 2048,
proj_hidden_dim: int = 8192,
out_dim: int = 8192,
):
super(BarlowTwins, self).__init__()
self.backbone = backbone
self.num_ftrs = num_ftrs
self.proj_hidden_dim = proj_hidden_dim
self.out_dim = out_dim
self.projection_mlp = BarlowTwinsProjectionHead(
num_ftrs, proj_hidden_dim, out_dim
)
warnings.warn(
Warning(
"The high-level building block BarlowTwins will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def forward(
self, x0: torch.Tensor, x1: torch.Tensor = None, return_features: bool = False
):
"""Forward pass through BarlowTwins.
Extracts features with the backbone and applies the projection
head to the output space. If both x0 and x1 are not None, both will be
passed through the backbone and projection. If x1 is None, only x0 will
be forwarded.
Barlow Twins only implement a projection head unlike SimSiam.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output projection of x0 and (if x1 is not None)
the output projection of x1. If return_features is
True, the output for each x is a tuple (out, f) where f are the
features before the projection head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
# forward pass first input
f0 = self.backbone(x0).flatten(start_dim=1)
out0 = self.projection_mlp(f0)
# append features if requested
if return_features:
out0 = (out0, f0)
if x1 is None:
return out0
# forward pass second input
f1 = self.backbone(x1).flatten(start_dim=1)
out1 = self.projection_mlp(f1)
# append features if requested
if return_features:
out1 = (out1, f1)
return out0, out1
| 3,976 | 31.333333 | 113 | py |
lightly | lightly-master/lightly/models/batchnorm.py | """ SplitBatchNorm Implementation """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import torch
import torch.nn as nn
class SplitBatchNorm(nn.BatchNorm2d):
"""Simulates multi-gpu behaviour of BatchNorm in one gpu by splitting.
Implementation was adapted from:
https://github.com/davidcpage/cifar10-fast/blob/master/torch_backend.py
Attributes:
num_features:
Number of input features.
num_splits:
Number of splits.
"""
def __init__(self, num_features, num_splits, **kw):
super().__init__(num_features, **kw)
self.num_splits = num_splits
self.register_buffer(
"running_mean", torch.zeros(num_features * self.num_splits)
)
self.register_buffer("running_var", torch.ones(num_features * self.num_splits))
def train(self, mode=True):
# lazily collate stats when we are going to use them
if (self.training is True) and (mode is False):
self.running_mean = torch.mean(
self.running_mean.view(self.num_splits, self.num_features), dim=0
).repeat(self.num_splits)
self.running_var = torch.mean(
self.running_var.view(self.num_splits, self.num_features), dim=0
).repeat(self.num_splits)
return super().train(mode)
def forward(self, input):
"""Computes the SplitBatchNorm on the input."""
# get input shape
N, C, H, W = input.shape
# during training, use different stats for each split and otherwise
# use the stats from the first split
if self.training or not self.track_running_stats:
result = nn.functional.batch_norm(
input.view(-1, C * self.num_splits, H, W),
self.running_mean,
self.running_var,
self.weight.repeat(self.num_splits),
self.bias.repeat(self.num_splits),
True,
self.momentum,
self.eps,
).view(N, C, H, W)
else:
result = nn.functional.batch_norm(
input,
self.running_mean[: self.num_features],
self.running_var[: self.num_features],
self.weight,
self.bias,
False,
self.momentum,
self.eps,
)
return result
def get_norm_layer(num_features: int, num_splits: int, **kw):
"""Utility to switch between BatchNorm2d and SplitBatchNorm."""
if num_splits > 0:
return SplitBatchNorm(num_features, num_splits)
else:
return nn.BatchNorm2d(num_features)
| 2,714 | 31.710843 | 87 | py |
lightly | lightly-master/lightly/models/byol.py | """ BYOL Model """
# Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models._momentum import _MomentumEncoderMixin
from lightly.models.modules import BYOLProjectionHead
def _get_byol_mlp(num_ftrs: int, hidden_dim: int, out_dim: int):
"""Returns a 2-layer MLP with batch norm on the hidden layer.
Reference (12.03.2021)
https://arxiv.org/abs/2006.07733
"""
modules = [
nn.Linear(num_ftrs, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, out_dim),
]
return nn.Sequential(*modules)
class BYOL(nn.Module, _MomentumEncoderMixin):
"""Implementation of the BYOL architecture.
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection mlp).
hidden_dim:
Dimension of the hidden layer in the projection and prediction mlp.
out_dim:
Dimension of the output (after the projection/prediction mlp).
m:
Momentum for the momentum update of encoder.
"""
def __init__(
self,
backbone: nn.Module,
num_ftrs: int = 2048,
hidden_dim: int = 4096,
out_dim: int = 256,
m: float = 0.9,
):
super(BYOL, self).__init__()
self.backbone = backbone
# the architecture of the projection and prediction head is the same
self.projection_head = BYOLProjectionHead(num_ftrs, hidden_dim, out_dim)
self.prediction_head = BYOLProjectionHead(out_dim, hidden_dim, out_dim)
self.momentum_backbone = None
self.momentum_projection_head = None
self._init_momentum_encoder()
self.m = m
warnings.warn(
Warning(
"The high-level building block BYOL will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def _forward(self, x0: torch.Tensor, x1: torch.Tensor = None):
"""Forward pass through the encoder and the momentum encoder.
Performs the momentum update, extracts features with the backbone and
applies the projection (and prediciton) head to the output space. If
x1 is None, only x0 will be processed otherwise, x0 is processed with
the encoder and x1 with the momentum encoder.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
Returns:
The output proejction of x0 and (if x1 is not None) the output
projection of x1.
Examples:
>>> # single input, single output
>>> out = model._forward(x)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model._forward(x0, x1)
"""
self._momentum_update(self.m)
# forward pass of first input x0
f0 = self.backbone(x0).flatten(start_dim=1)
z0 = self.projection_head(f0)
out0 = self.prediction_head(z0)
if x1 is None:
return out0
# forward pass of second input x1
with torch.no_grad():
f1 = self.momentum_backbone(x1).flatten(start_dim=1)
out1 = self.momentum_projection_head(f1)
return out0, out1
def forward(
self, x0: torch.Tensor, x1: torch.Tensor, return_features: bool = False
):
"""Symmetrizes the forward pass (see _forward).
Performs two forward passes, once where x0 is passed through the encoder
and x1 through the momentum encoder and once the other way around.
Note that this model currently requires two inputs for the forward pass
(x0 and x1) which correspond to the two augmentations.
Furthermore, `the return_features` argument does not work yet.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
Returns:
A tuple out0, out1, where out0 and out1 are tuples containing the
predictions and projections of x0 and x1: out0 = (z0, p0) and
out1 = (z1, p1).
Examples:
>>> # initialize the model and the loss function
>>> model = BYOL()
>>> criterion = SymNegCosineSimilarityLoss()
>>>
>>> # forward pass for two batches of transformed images x1 and x2
>>> out0, out1 = model(x0, x1)
>>> loss = criterion(out0, out1)
"""
if x0 is None:
raise ValueError("x0 must not be None!")
if x1 is None:
raise ValueError("x1 must not be None!")
if not all([s0 == s1 for s0, s1 in zip(x0.shape, x1.shape)]):
raise ValueError(
f"x0 and x1 must have same shape but got shapes {x0.shape} and {x1.shape}!"
)
p0, z1 = self._forward(x0, x1)
p1, z0 = self._forward(x1, x0)
return (z0, p0), (z1, p1)
| 5,349 | 30.845238 | 113 | py |
lightly | lightly-master/lightly/models/moco.py | """ MoCo Model """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models._momentum import _MomentumEncoderMixin
from lightly.models.modules import MoCoProjectionHead
class MoCo(nn.Module, _MomentumEncoderMixin):
"""Implementation of the MoCo (Momentum Contrast)[0] architecture.
Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss` with
a memory bank.
[0] MoCo, 2020, https://arxiv.org/abs/1911.05722
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection head).
out_dim:
Dimension of the output (after the projection head).
m:
Momentum for momentum update of the key-encoder.
"""
def __init__(
self,
backbone: nn.Module,
num_ftrs: int = 32,
out_dim: int = 128,
m: float = 0.999,
batch_shuffle: bool = False,
):
super(MoCo, self).__init__()
self.backbone = backbone
self.projection_head = MoCoProjectionHead(num_ftrs, num_ftrs, out_dim)
self.momentum_features = None
self.momentum_projection_head = None
self.m = m
self.batch_shuffle = batch_shuffle
# initialize momentum features and momentum projection head
self._init_momentum_encoder()
warnings.warn(
Warning(
"The high-level building block MoCo will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def forward(
self, x0: torch.Tensor, x1: torch.Tensor = None, return_features: bool = False
):
"""Embeds and projects the input image.
Performs the momentum update, extracts features with the backbone and
applies the projection head to the output space. If both x0 and x1 are
not None, both will be passed through the backbone and projection head.
If x1 is None, only x0 will be forwarded.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output projection of x0 and (if x1 is not None) the output
projection of x1. If return_features is True, the output for each x
is a tuple (out, f) where f are the features before the projection
head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
self._momentum_update(self.m)
# forward pass of first input x0
f0 = self.backbone(x0).flatten(start_dim=1)
out0 = self.projection_head(f0)
# append features if requested
if return_features:
out0 = (out0, f0)
# return out0 if x1 is None
if x1 is None:
return out0
# forward pass of second input x1
with torch.no_grad():
# shuffle for batchnorm
if self.batch_shuffle:
x1, shuffle = self._batch_shuffle(x1)
# run x1 through momentum encoder
f1 = self.momentum_backbone(x1).flatten(start_dim=1)
out1 = self.momentum_projection_head(f1).detach()
# unshuffle for batchnorm
if self.batch_shuffle:
f1 = self._batch_unshuffle(f1, shuffle)
out1 = self._batch_unshuffle(out1, shuffle)
# append features if requested
if return_features:
out1 = (out1, f1)
return out0, out1
| 4,373 | 30.927007 | 113 | py |
lightly | lightly-master/lightly/models/nnclr.py | """ NNCLR Model """
# Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models.modules import NNCLRPredictionHead, NNCLRProjectionHead
def _prediction_mlp(in_dims: int, h_dims: int, out_dims: int) -> nn.Sequential:
"""Prediction MLP. The original paper's implementation has 2 layers, with
BN applied to its hidden fc layers but no BN or ReLU on the output fc layer.
Note that the hidden dimensions should be smaller than the input/output
dimensions (bottleneck structure). The default implementation using a
ResNet50 backbone has an input dimension of 2048, hidden dimension of 512,
and output dimension of 2048
Args:
in_dims:
Input dimension of the first linear layer.
h_dims:
Hidden dimension of all the fully connected layers (should be a
bottleneck!)
out_dims:
Output Dimension of the final linear layer.
Returns:
nn.Sequential:
The projection head.
"""
l1 = nn.Sequential(
nn.Linear(in_dims, h_dims), nn.BatchNorm1d(h_dims), nn.ReLU(inplace=True)
)
l2 = nn.Linear(h_dims, out_dims)
prediction = nn.Sequential(l1, l2)
return prediction
def _projection_mlp(
num_ftrs: int, h_dims: int, out_dim: int, num_layers: int = 3
) -> nn.Sequential:
"""Projection MLP. The original paper's implementation has 3 layers, with
BN applied to its hidden fc layers but no ReLU on the output fc layer.
The CIFAR-10 study used a MLP with only two layers.
Args:
in_dims:
Input dimension of the first linear layer.
h_dims:
Hidden dimension of all the fully connected layers.
out_dims:
Output Dimension of the final linear layer.
num_layers:
Controls the number of layers; must be 2 or 3. Defaults to 3.
Returns:
nn.Sequential:
The projection head.
"""
l1 = nn.Sequential(
nn.Linear(num_ftrs, h_dims), nn.BatchNorm1d(h_dims), nn.ReLU(inplace=True)
)
l2 = nn.Sequential(
nn.Linear(h_dims, h_dims), nn.BatchNorm1d(h_dims), nn.ReLU(inplace=True)
)
l3 = nn.Sequential(nn.Linear(h_dims, out_dim), nn.BatchNorm1d(out_dim))
if num_layers == 3:
projection = nn.Sequential(l1, l2, l3)
elif num_layers == 2:
projection = nn.Sequential(l1, l3)
else:
raise NotImplementedError("Only MLPs with 2 and 3 layers are implemented.")
return projection
class NNCLR(nn.Module):
"""Implementation of the NNCLR[0] architecture
Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss`
Recommended module: :py:class:`lightly.models.modules.nn_memory_bank.NNmemoryBankModule`
[0] NNCLR, 2021, https://arxiv.org/abs/2104.14548
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection head).
proj_hidden_dim:
Dimension of the hidden layer of the projection head.
pred_hidden_dim:
Dimension of the hidden layer of the predicion head.
out_dim:
Dimension of the output (after the projection head).
num_mlp_layers:
Number of linear layers for MLP.
Examples:
>>> model = NNCLR(backbone)
>>> criterion = NTXentLoss(temperature=0.1)
>>>
>>> nn_replacer = NNmemoryBankModule(size=2 ** 16)
>>>
>>> # forward pass
>>> (z0, p0), (z1, p1) = model(x0, x1)
>>> z0 = nn_replacer(z0.detach(), update=False)
>>> z1 = nn_replacer(z1.detach(), update=True)
>>>
>>> loss = 0.5 * (criterion(z0, p1) + criterion(z1, p0))
"""
def __init__(
self,
backbone: nn.Module,
num_ftrs: int = 512,
proj_hidden_dim: int = 2048,
pred_hidden_dim: int = 4096,
out_dim: int = 256,
):
super(NNCLR, self).__init__()
self.backbone = backbone
self.num_ftrs = num_ftrs
self.proj_hidden_dim = proj_hidden_dim
self.pred_hidden_dim = pred_hidden_dim
self.out_dim = out_dim
self.projection_mlp = NNCLRProjectionHead(
num_ftrs,
proj_hidden_dim,
out_dim,
)
self.prediction_mlp = NNCLRPredictionHead(
out_dim,
pred_hidden_dim,
out_dim,
)
warnings.warn(
Warning(
"The high-level building block NNCLR will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def forward(
self, x0: torch.Tensor, x1: torch.Tensor = None, return_features: bool = False
):
"""Embeds and projects the input images.
Extracts features with the backbone and applies the projection
head to the output space. If both x0 and x1 are not None, both will be
passed through the backbone and projection head. If x1 is None, only
x0 will be forwarded.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output projection of x0 and (if x1 is not None) the output
projection of x1. If return_features is True, the output for each x
is a tuple (out, f) where f are the features before the projection
head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
# forward pass of first input x0
f0 = self.backbone(x0).flatten(start_dim=1)
z0 = self.projection_mlp(f0)
p0 = self.prediction_mlp(z0)
out0 = (z0, p0)
# append features if requested
if return_features:
out0 = (out0, f0)
# return out0 if x1 is None
if x1 is None:
return out0
# forward pass of second input x1
f1 = self.backbone(x1).flatten(start_dim=1)
z1 = self.projection_mlp(f1)
p1 = self.prediction_mlp(z1)
out1 = (z1, p1)
# append features if requested
if return_features:
out1 = (out1, f1)
# return both outputs
return out0, out1
| 7,095 | 30.122807 | 113 | py |
lightly | lightly-master/lightly/models/resnet.py | """Custom ResNet Implementation
Note that the architecture we present here differs from the one used in
torchvision. We replace the first 7x7 convolution by a 3x3 convolution to make
the model faster and run better on smaller input image resolutions.
Furthermore, we introduce a resnet-9 variant for extra small models. These can
run for example on a microcontroller with 100kBytes of storage.
"""
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from lightly.models.batchnorm import get_norm_layer
class BasicBlock(nn.Module):
"""Implementation of the ResNet Basic Block.
Attributes:
in_planes:
Number of input channels.
planes:
Number of channels.
stride:
Stride of the first convolutional.
"""
expansion = 1
def __init__(
self, in_planes: int, planes: int, stride: int = 1, num_splits: int = 0
):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = get_norm_layer(planes, num_splits)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = get_norm_layer(planes, num_splits)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
get_norm_layer(self.expansion * planes, num_splits),
)
def forward(self, x: torch.Tensor):
"""Forward pass through basic ResNet block.
Args:
x:
Tensor of shape bsz x channels x W x H
Returns:
Tensor of shape bsz x channels x W x H
"""
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
"""Implementation of the ResNet Bottleneck Block.
Attributes:
in_planes:
Number of input channels.
planes:
Number of channels.
stride:
Stride of the first convolutional.
"""
expansion = 4
def __init__(
self, in_planes: int, planes: int, stride: int = 1, num_splits: int = 0
):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = get_norm_layer(planes, num_splits)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = get_norm_layer(planes, num_splits)
self.conv3 = nn.Conv2d(
planes, self.expansion * planes, kernel_size=1, bias=False
)
self.bn3 = get_norm_layer(self.expansion * planes, num_splits)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
get_norm_layer(self.expansion * planes, num_splits),
)
def forward(self, x):
"""Forward pass through bottleneck ResNet block.
Args:
x:
Tensor of shape bsz x channels x W x H
Returns:
Tensor of shape bsz x channels x W x H
"""
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
"""ResNet implementation.
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Attributes:
block:
ResNet building block type.
layers:
List of blocks per layer.
num_classes:
Number of classes in final softmax layer.
width:
Multiplier for ResNet width.
"""
def __init__(
self,
block: nn.Module = BasicBlock,
layers: List[int] = [2, 2, 2, 2],
num_classes: int = 10,
width: float = 1.0,
num_splits: int = 0,
):
super(ResNet, self).__init__()
self.in_planes = int(64 * width)
self.base = int(64 * width)
self.conv1 = nn.Conv2d(
3, self.base, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = get_norm_layer(self.base, num_splits)
self.layer1 = self._make_layer(
block, self.base, layers[0], stride=1, num_splits=num_splits
)
self.layer2 = self._make_layer(
block, self.base * 2, layers[1], stride=2, num_splits=num_splits
)
self.layer3 = self._make_layer(
block, self.base * 4, layers[2], stride=2, num_splits=num_splits
)
self.layer4 = self._make_layer(
block, self.base * 8, layers[3], stride=2, num_splits=num_splits
)
self.linear = nn.Linear(self.base * 8 * block.expansion, num_classes)
def _make_layer(self, block, planes, layers, stride, num_splits):
strides = [stride] + [1] * (layers - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, num_splits))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor):
"""Forward pass through ResNet.
Args:
x:
Tensor of shape bsz x channels x W x H
Returns:
Output tensor of shape bsz x num_classes
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNetGenerator(
name: str = "resnet-18",
width: float = 1,
num_classes: int = 10,
num_splits: int = 0,
):
"""Builds and returns the specified ResNet.
Args:
name:
ResNet version from resnet-{9, 18, 34, 50, 101, 152}.
width:
ResNet width.
num_classes:
Output dim of the last layer.
num_splits:
Number of splits to use for SplitBatchNorm (for MoCo model).
Increase this number to simulate multi-gpu behavior.
E.g. `num_splits=8` simulates a 8-GPU cluster.
`num_splits=0` uses normal PyTorch BatchNorm.
Returns:
ResNet as nn.Module.
Examples:
>>> # binary classifier with ResNet-34
>>> from lightly.models import ResNetGenerator
>>> resnet = ResNetGenerator('resnet-34', num_classes=2)
"""
model_params = {
"resnet-9": {"block": BasicBlock, "layers": [1, 1, 1, 1]},
"resnet-18": {"block": BasicBlock, "layers": [2, 2, 2, 2]},
"resnet-34": {"block": BasicBlock, "layers": [3, 4, 6, 3]},
"resnet-50": {"block": Bottleneck, "layers": [3, 4, 6, 3]},
"resnet-101": {"block": Bottleneck, "layers": [3, 4, 23, 3]},
"resnet-152": {"block": Bottleneck, "layers": [3, 8, 36, 3]},
}
if name not in model_params.keys():
raise ValueError(
"Illegal name: {%s}. \
Try resnet-9, resnet-18, resnet-34, resnet-50, resnet-101, resnet-152."
% (name)
)
return ResNet(
**model_params[name],
width=width,
num_classes=num_classes,
num_splits=num_splits
)
| 8,383 | 27.517007 | 82 | py |
lightly | lightly-master/lightly/models/simclr.py | """ SimCLR Model """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models.modules import SimCLRProjectionHead
class SimCLR(nn.Module):
"""Implementation of the SimCLR[0] architecture
Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss`
[0] SimCLR, 2020, https://arxiv.org/abs/2002.05709
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection head).
out_dim:
Dimension of the output (after the projection head).
"""
def __init__(self, backbone: nn.Module, num_ftrs: int = 32, out_dim: int = 128):
super(SimCLR, self).__init__()
self.backbone = backbone
self.projection_head = SimCLRProjectionHead(
num_ftrs, num_ftrs, out_dim, batch_norm=False
)
warnings.warn(
Warning(
"The high-level building block SimCLR will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def forward(
self, x0: torch.Tensor, x1: torch.Tensor = None, return_features: bool = False
):
"""Embeds and projects the input images.
Extracts features with the backbone and applies the projection
head to the output space. If both x0 and x1 are not None, both will be
passed through the backbone and projection head. If x1 is None, only
x0 will be forwarded.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output projection of x0 and (if x1 is not None) the output
projection of x1. If return_features is True, the output for each x
is a tuple (out, f) where f are the features before the projection
head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
# forward pass of first input x0
f0 = self.backbone(x0).flatten(start_dim=1)
out0 = self.projection_head(f0)
# append features if requested
if return_features:
out0 = (out0, f0)
# return out0 if x1 is None
if x1 is None:
return out0
# forward pass of second input x1
f1 = self.backbone(x1).flatten(start_dim=1)
out1 = self.projection_head(f1)
# append features if requested
if return_features:
out1 = (out1, f1)
# return both outputs
return out0, out1
| 3,399 | 30.192661 | 113 | py |
lightly | lightly-master/lightly/models/simsiam.py | """ SimSiam Model """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import warnings
import torch
import torch.nn as nn
from lightly.models.modules import SimSiamPredictionHead, SimSiamProjectionHead
class SimSiam(nn.Module):
"""Implementation of SimSiam[0] network
Recommended loss: :py:class:`lightly.loss.sym_neg_cos_sim_loss.SymNegCosineSimilarityLoss`
[0] SimSiam, 2020, https://arxiv.org/abs/2011.10566
Attributes:
backbone:
Backbone model to extract features from images.
num_ftrs:
Dimension of the embedding (before the projection head).
proj_hidden_dim:
Dimension of the hidden layer of the projection head. This should
be the same size as `num_ftrs`.
pred_hidden_dim:
Dimension of the hidden layer of the predicion head. This should
be `num_ftrs` / 4.
out_dim:
Dimension of the output (after the projection head).
"""
def __init__(
self,
backbone: nn.Module,
num_ftrs: int = 2048,
proj_hidden_dim: int = 2048,
pred_hidden_dim: int = 512,
out_dim: int = 2048,
):
super(SimSiam, self).__init__()
self.backbone = backbone
self.num_ftrs = num_ftrs
self.proj_hidden_dim = proj_hidden_dim
self.pred_hidden_dim = pred_hidden_dim
self.out_dim = out_dim
self.projection_mlp = SimSiamProjectionHead(
num_ftrs,
proj_hidden_dim,
out_dim,
)
self.prediction_mlp = SimSiamPredictionHead(
out_dim,
pred_hidden_dim,
out_dim,
)
warnings.warn(
Warning(
"The high-level building block SimSiam will be deprecated in version 1.3.0. "
+ "Use low-level building blocks instead. "
+ "See https://docs.lightly.ai/self-supervised-learning/lightly.models.html for more information"
),
DeprecationWarning,
)
def forward(
self, x0: torch.Tensor, x1: torch.Tensor = None, return_features: bool = False
):
"""Forward pass through SimSiam.
Extracts features with the backbone and applies the projection
head and prediction head to the output space. If both x0 and x1 are not
None, both will be passed through the backbone, projection, and
prediction head. If x1 is None, only x0 will be forwarded.
Args:
x0:
Tensor of shape bsz x channels x W x H.
x1:
Tensor of shape bsz x channels x W x H.
return_features:
Whether or not to return the intermediate features backbone(x).
Returns:
The output prediction and projection of x0 and (if x1 is not None)
the output prediction and projection of x1. If return_features is
True, the output for each x is a tuple (out, f) where f are the
features before the projection head.
Examples:
>>> # single input, single output
>>> out = model(x)
>>>
>>> # single input with return_features=True
>>> out, f = model(x, return_features=True)
>>>
>>> # two inputs, two outputs
>>> out0, out1 = model(x0, x1)
>>>
>>> # two inputs, two outputs with return_features=True
>>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)
"""
f0 = self.backbone(x0).flatten(start_dim=1)
z0 = self.projection_mlp(f0)
p0 = self.prediction_mlp(z0)
out0 = (z0, p0)
# append features if requested
if return_features:
out0 = (out0, f0)
if x1 is None:
return out0
f1 = self.backbone(x1).flatten(start_dim=1)
z1 = self.projection_mlp(f1)
p1 = self.prediction_mlp(z1)
out1 = (z1, p1)
# append features if requested
if return_features:
out1 = (out1, f1)
return out0, out1
| 4,169 | 29.888889 | 113 | py |
lightly | lightly-master/lightly/models/utils.py | """ Utils for working with SSL models """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
import math
import warnings
from typing import Iterable, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from numpy.typing import NDArray
from torch.nn import Module
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parameter import Parameter
@torch.no_grad()
def batch_shuffle(
batch: torch.Tensor, distributed: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Randomly shuffles all tensors in the batch.
Args:
batch:
The batch to shuffle.
distributed:
If True then batches are shuffled across multiple gpus.
Returns:
A (batch, shuffle) tuple where batch is the shuffled version of the
input batch and shuffle is an index to restore the original order.
Examples:
>>> # forward pass through the momentum model with batch shuffling
>>> x1_shuffled, shuffle = batch_shuffle(x1)
>>> f1 = moco_momentum(x1)
>>> out0 = projection_head_momentum(f0)
>>> out1 = batch_unshuffle(out1, shuffle)
"""
if distributed:
return batch_shuffle_distributed(batch)
batch_size = batch.shape[0]
shuffle = torch.randperm(batch_size, device=batch.device)
return batch[shuffle], shuffle
@torch.no_grad()
def batch_unshuffle(
batch: torch.Tensor,
shuffle: torch.Tensor,
distributed: bool = False,
) -> torch.Tensor:
"""Unshuffles a batch.
Args:
batch:
The batch to unshuffle.
shuffle:
Index to unshuffle the batch.
distributed:
If True then the batch is unshuffled across multiple gpus.
Returns:
The unshuffled batch.
Examples:
>>> # forward pass through the momentum model with batch shuffling
>>> x1_shuffled, shuffle = batch_shuffle(x1)
>>> f1 = moco_momentum(x1)
>>> out0 = projection_head_momentum(f0)
>>> out1 = batch_unshuffle(out1, shuffle)
"""
if distributed:
return batch_unshuffle_distributed(batch, shuffle)
unshuffle = torch.argsort(shuffle)
return batch[unshuffle]
@torch.no_grad()
def concat_all_gather(x: torch.Tensor) -> torch.Tensor:
"""Returns concatenated instances of x gathered from all gpus.
This code was taken and adapted from here:
https://github.com/facebookresearch/moco.
"""
output = [torch.empty_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x, async_op=False)
output = torch.cat(output, dim=0)
return output
@torch.no_grad()
def batch_shuffle_distributed(batch: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Shuffles batch over multiple gpus.
This code was taken and adapted from here:
https://github.com/facebookresearch/moco.
Args:
batch:
The tensor to shuffle.
Returns:
A (batch, shuffle) tuple where batch is the shuffled version of the
input batch and shuffle is an index to restore the original order.
"""
# gather from all gpus
batch_size_this = batch.shape[0]
batch_gather = concat_all_gather(batch)
batch_size_all = batch_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
dist.broadcast(idx_shuffle, src=0)
# index for restoring
shuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = dist.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return batch_gather[idx_this], shuffle
@torch.no_grad()
def batch_unshuffle_distributed(
batch: torch.Tensor, shuffle: torch.Tensor
) -> torch.Tensor:
"""Undo batch shuffle over multiple gpus.
This code was taken and adapted from here:
https://github.com/facebookresearch/moco.
Args:
batch:
The tensor to unshuffle.
shuffle:
Index to restore the original tensor.
Returns:
The unshuffled tensor.
"""
# gather from all gpus
batch_size_this = batch.shape[0]
batch_gather = concat_all_gather(batch)
batch_size_all = batch_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = dist.get_rank()
idx_this = shuffle.view(num_gpus, -1)[gpu_idx]
return batch_gather[idx_this]
def deactivate_requires_grad(model: nn.Module):
"""Deactivates the requires_grad flag for all parameters of a model.
This has the same effect as permanently executing the model within a `torch.no_grad()`
context. Use this method to disable gradient computation and therefore
training for a model.
Examples:
>>> backbone = resnet18()
>>> deactivate_requires_grad(backbone)
"""
for param in model.parameters():
param.requires_grad = False
def activate_requires_grad(model: nn.Module):
"""Activates the requires_grad flag for all parameters of a model.
Use this method to activate gradients for a model (e.g. after deactivating
them using `deactivate_requires_grad(...)`).
Examples:
>>> backbone = resnet18()
>>> activate_requires_grad(backbone)
"""
for param in model.parameters():
param.requires_grad = True
@torch.no_grad()
def update_momentum(model: nn.Module, model_ema: nn.Module, m: float):
"""Updates parameters of `model_ema` with Exponential Moving Average of `model`
Momentum encoders are a crucial component fo models such as MoCo or BYOL.
Examples:
>>> backbone = resnet18()
>>> projection_head = MoCoProjectionHead()
>>> backbone_momentum = copy.deepcopy(moco)
>>> projection_head_momentum = copy.deepcopy(projection_head)
>>>
>>> # update momentum
>>> update_momentum(moco, moco_momentum, m=0.999)
>>> update_momentum(projection_head, projection_head_momentum, m=0.999)
"""
for model_ema, model in zip(model_ema.parameters(), model.parameters()):
model_ema.data = model_ema.data * m + model.data * (1.0 - m)
@torch.no_grad()
def normalize_weight(weight: nn.Parameter, dim: int = 1, keepdim: bool = True):
"""Normalizes the weight to unit length along the specified dimension."""
weight.div_(torch.norm(weight, dim=dim, keepdim=keepdim))
# copy paste from PyTorch master branch as it is not available in older releases
# source: https://github.com/pytorch/pytorch/blob/20ac7362009dd8e0aca6e72fc9357773136a83b8/torch/nn/init.py#L22-L54
def _no_grad_trunc_normal(
tensor: torch.Tensor,
mean: float,
std: float,
a: float,
b: float,
) -> torch.Tensor:
"""Initializes the input tensor with a truncated normal distribution.
This method is based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
Args:
tensor:
The tensor to initialize.
mean:
Mean of the distribution.
std:
Standard deviation of the distribution.
a:
Minimum value of the distribution, values below will be clamped.
b:
Maximum value of the distribution, values above will be clamped.
"""
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def repeat_token(token: torch.Tensor, size: Tuple[int, int]) -> torch.Tensor:
"""Repeats a token size times.
Args:
token:
Token tensor with shape (1, 1, dim).
size:
(batch_size, sequence_length) tuple.
Returns:
Tensor with shape (batch_size, sequence_length, dim) containing copies
of the input token.
"""
batch_size, sequence_length = size
return token.repeat(batch_size, sequence_length, 1)
def expand_index_like(index: torch.Tensor, tokens: torch.Tensor) -> torch.Tensor:
"""Expands the index along the last dimension of the input tokens.
Args:
index:
Index tensor with shape (batch_size, idx_length) where each entry is
an index in [0, sequence_length).
tokens:
Tokens tensor with shape (batch_size, sequence_length, dim).
Returns:
Index tensor with shape (batch_size, idx_length, dim) where the original
indices are repeated dim times along the last dimension.
"""
dim = tokens.shape[-1]
index = index.unsqueeze(-1).expand(-1, -1, dim)
return index
def get_at_index(tokens: torch.Tensor, index: torch.Tensor) -> torch.Tensor:
"""Selects tokens at index.
Args:
tokens:
Token tensor with shape (batch_size, sequence_length, dim).
index:
Index tensor with shape (batch_size, index_length) where each entry is
an index in [0, sequence_length).
Returns:
Token tensor with shape (batch_size, index_length, dim) containing the
selected tokens.
"""
index = expand_index_like(index, tokens)
return torch.gather(tokens, 1, index)
def set_at_index(
tokens: torch.Tensor, index: torch.Tensor, value: torch.Tensor
) -> torch.Tensor:
"""Copies all values into the input tensor at the given indices.
Args:
tokens:
Tokens tensor with shape (batch_size, sequence_length, dim).
index:
Index tensor with shape (batch_size, index_length).
value:
Value tensor with shape (batch_size, index_length, dim).
Returns:
Tokens tensor with shape (batch_size, sequence_length, dim) containing
the new values.
"""
index = expand_index_like(index, tokens)
return torch.scatter(tokens, 1, index, value)
def mask_at_index(
tokens: torch.Tensor, index: torch.Tensor, mask_token: torch.Tensor
) -> torch.Tensor:
"""Copies mask token into the input tensor at the given indices.
Args:
tokens:
Tokens tensor with shape (batch_size, sequence_length, dim).
index:
Index tensor with shape (batch_size, index_length).
mask_token:
Value tensor with shape (1, 1, dim).
Returns:
Tokens tensor with shape (batch_size, sequence_length, dim) containing
the new values.
"""
mask = tokens.new_zeros(tokens.shape)
mask = set_at_index(mask, index, 1)
return (1 - mask) * tokens + mask * mask_token
def prepend_class_token(
tokens: torch.Tensor, class_token: torch.Tensor
) -> torch.Tensor:
"""Prepends class token to tokens.
Args:
tokens:
Tokens tensor with shape (batch_size, sequence_length, dim).
class_token:
Class token with shape (1, 1, dim).
Returns:
Tokens tensor with the class token prepended at index 0 in every
sequence. The tensor has shape (batch_size, sequence_length + 1, dim).
"""
batch_size = tokens.shape[0]
batch_class_token = class_token.expand(batch_size, -1, -1)
return torch.cat([batch_class_token, tokens], dim=1)
def patchify(images: torch.Tensor, patch_size: int) -> torch.Tensor:
"""Converts a batch of input images into patches.
Args:
images:
Images tensor with shape (batch_size, channels, height, width)
patch_size:
Patch size in pixels. Image width and height must be multiples of
the patch size.
Returns:
Patches tensor with shape (batch_size, num_patches, channels * patch_size ** 2)
where num_patches = image_width / patch_size * image_height / patch_size.
"""
# N, C, H, W = (batch_size, channels, height, width)
N, C, H, W = images.shape
assert H == W and H % patch_size == 0
patch_h = patch_w = H // patch_size
num_patches = patch_h * patch_w
patches = images.reshape(shape=(N, C, patch_h, patch_size, patch_w, patch_size))
patches = torch.einsum("nchpwq->nhwpqc", patches)
patches = patches.reshape(shape=(N, num_patches, patch_size**2 * C))
return patches
def random_token_mask(
size: Tuple[int, int],
mask_ratio: float = 0.6,
mask_class_token: bool = False,
device: Optional[Union[torch.device, str]] = None,
) -> torch.Tensor:
"""Creates random token masks.
Args:
size:
Size of the token batch for which to generate masks.
Should be (batch_size, sequence_length).
mask_ratio:
Percentage of tokens to mask.
mask_class_token:
If False the class token is never masked. If True the class token
might be masked.
device:
Device on which to create the index masks.
Returns:
A (index_keep, index_mask) tuple where each index is a tensor.
index_keep contains the indices of the unmasked tokens and has shape
(batch_size, num_keep). index_mask contains the indices of the masked
tokens and has shape (batch_size, sequence_length - num_keep).
num_keep is equal to sequence_length * (1- mask_ratio).
"""
batch_size, sequence_length = size
num_keep = int(sequence_length * (1 - mask_ratio))
noise = torch.rand(batch_size, sequence_length, device=device)
if not mask_class_token and sequence_length > 0:
# make sure that class token is not masked
noise[:, 0] = -1
num_keep = max(1, num_keep)
# get indices of tokens to keep
indices = torch.argsort(noise, dim=1)
idx_keep = indices[:, :num_keep]
idx_mask = indices[:, num_keep:]
return idx_keep, idx_mask
def nearest_neighbors(
input_maps: torch.Tensor,
candidate_maps: torch.Tensor,
distances: torch.Tensor,
num_matches: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Finds the nearest neighbors of the maps in input_maps in candidate_maps.
Args:
input_maps:
A tensor of maps for which to find nearest neighbors.
It has size: [batch_size, input_map_size, feature_dimension]
candidate_maps:
A tensor of maps to search for nearest neighbors.
It has size: [batch_size, candidate_map_size, feature_dimension]
distances:
A tensor of distances between the maps in input_maps and candidate_maps.
It has size: [batch_size, input_map_size, candidate_map_size]
num_matches:
Number of nearest neighbors to return. If num_matches is None or -1,
all the maps in candidate_maps are considered.
Returns:
A tuple of tensors, containing the nearest neighbors in input_maps and candidate_maps.
They both have size: [batch_size, input_map_size, feature_dimension]
"""
if num_matches is None or num_matches == -1 or num_matches > input_maps.size(1):
num_matches = input_maps.size(1)
# Find nearest neighbour of each input element in the candidate map
topk_values, topk_indices = distances.topk(
k=1, dim=2, largest=False
) # [bsz, input_map_size, 1]
topk_values = topk_values.squeeze(-1) # [bsz, input_map_size]
# Select num_matches neighbors pairs having the lowest distance value.
_, min_indices = topk_values.topk(
k=num_matches, dim=1, largest=False
) # [bsz, num_matches]
# Create the filtered input map with num_matches lowest distance values.
feature_dimension = input_maps.shape[2]
filtered_input_maps = torch.gather(
input_maps, 1, min_indices.unsqueeze(-1).expand(-1, -1, feature_dimension)
) # [bsz, num_matches, feature_dimension]
# Create candidate maps in the same way as input maps, but using corrispondent candidate values
selected_candidate_maps = torch.gather(
candidate_maps, 1, topk_indices.expand(-1, -1, feature_dimension)
) # [bsz, input_map_size, feature_dimension]
filtered_candidate_maps = torch.gather(
selected_candidate_maps,
1,
min_indices.unsqueeze(-1).expand(-1, -1, feature_dimension),
) # [bsz, num_matches, feature_dimension]
return filtered_input_maps, filtered_candidate_maps
def get_weight_decay_parameters(
modules: Iterable[Module],
decay_batch_norm: bool = False,
decay_bias: bool = False,
) -> Tuple[List[Parameter], List[Parameter]]:
"""Returns all parameters of the modules that should be decayed and not decayed.
Args:
modules:
List of modules to get the parameters from.
no_batch_norm:
If True, batch norm parameters are decayed.
no_bias:
If True, bias parameters are decayed.
Returns:
(params, params_no_weight_decay) tuple.
"""
params = []
params_no_weight_decay = []
for module in modules:
for mod in module.modules():
if isinstance(mod, _BatchNorm):
if not decay_batch_norm:
params_no_weight_decay.extend(mod.parameters(recurse=False))
else:
params.extend(mod.parameters(recurse=False))
else:
for name, param in mod.named_parameters(recurse=False):
if not decay_bias and name.endswith("bias"):
params_no_weight_decay.append(param)
else:
params.append(param)
return params, params_no_weight_decay
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
return _no_grad_trunc_normal(tensor, mean, std, a, b)
def apply_masks(x, masks):
"""
:param x: tensor of shape [B (batch-size), N (num-patches), D (feature-dim)]
:param masks: list of tensors containing indices of patches in [N] to keep
"""
all_x = []
for m in masks:
mask_keep = m.unsqueeze(-1).repeat(1, 1, x.size(-1))
all_x += [torch.gather(x, dim=1, index=mask_keep)]
return torch.cat(all_x, dim=0)
def repeat_interleave_batch(x, B, repeat):
N = len(x) // B
x = torch.cat(
[
torch.cat([x[i * B : (i + 1) * B] for _ in range(repeat)], dim=0)
for i in range(N)
],
dim=0,
)
return x
def get_2d_sincos_pos_embed(
embed_dim: int, grid_size: int, cls_token: bool = False
) -> NDArray[np.float_]:
"""Returns 2D sin-cos embeddings. Code from [0].
- [0]: https://github.com/facebookresearch/ijepa
Args:
embed_dim:
Embedding dimension.
grid_size:
Grid height and width. Should usually be set to sqrt(sequence length).
cls_token:
If True, a positional embedding for the class token is prepended to the returned
embeddings.
Returns:
Positional embeddings array with size (grid_size * grid_size, embed_dim) if cls_token is False.
If cls_token is True, a (1 + grid_size * grid_size, embed_dim) array is returned.
"""
grid_h = np.arange(grid_size, dtype=float)
grid_w = np.arange(grid_size, dtype=float)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(
embed_dim: int, grid: NDArray[np.int_]
) -> NDArray[np.float_]:
"""Returns 2D sin-cos embeddings grid. Code from [0].
- [0]: https://github.com/facebookresearch/ijepa
Args:
embed_dim:
Embedding dimension.
grid:
2-dimensional grid to embed.
Returns:
Positional embeddings array with size (grid_size * grid_size, embed_dim).
"""
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed(
embed_dim: int, grid_size: int, cls_token: bool = False
) -> NDArray[np.float_]:
"""Returns 1D sin-cos embeddings. Code from [0].
- [0]: https://github.com/facebookresearch/ijepa
Args:
embed_dim:
Embedding dimension.
grid_size:
Grid height and width. Should usually be set to sqrt(sequence length).
cls_token:
If True, a positional embedding for the class token is prepended to the returned
embeddings.
Returns:
Positional embeddings array with size (grid_size, embed_dim) if cls_token is False.
If cls_token is True, a (1 + grid_size, embed_dim) array is returned.
"""
grid = np.arange(grid_size, dtype=float)
pos_embed = get_1d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_1d_sincos_pos_embed_from_grid(
embed_dim: int, pos: NDArray[np.int_]
) -> NDArray[np.float_]:
"""Returns 1D sin-cos embeddings grid. Code from [0].
- [0]: https://github.com/facebookresearch/ijepa
Args:
embed_dim:
Embedding dimension.
pos:
1-dimensional grid to embed.
Returns:
Positional embeddings array with size (grid_size, embed_dim).
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
| 23,040 | 31.135286 | 115 | py |
lightly | lightly-master/lightly/models/zoo.py | """ Lightly Model Zoo """
# Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
ZOO = {
"resnet-9/simclr/d16/w0.0625": "https://storage.googleapis.com/models_boris/whattolabel-resnet9-simclr-d16-w0.0625-i-ce0d6bd9.pth",
"resnet-9/simclr/d16/w0.125": "https://storage.googleapis.com/models_boris/whattolabel-resnet9-simclr-d16-w0.125-i-7269c38d.pth",
"resnet-18/simclr/d16/w1.0": "https://storage.googleapis.com/models_boris/whattolabel-resnet18-simclr-d16-w1.0-i-58852cb9.pth",
"resnet-18/simclr/d32/w1.0": "https://storage.googleapis.com/models_boris/whattolabel-resnet18-simclr-d32-w1.0-i-085d0693.pth",
"resnet-34/simclr/d16/w1.0": "https://storage.googleapis.com/models_boris/whattolabel-resnet34-simclr-d16-w1.0-i-6e80d963.pth",
"resnet-34/simclr/d32/w1.0": "https://storage.googleapis.com/models_boris/whattolabel-resnet34-simclr-d32-w1.0-i-9f185b45.pth",
}
def checkpoints():
"""Returns the Lightly model zoo as a list of checkpoints.
Checkpoints:
ResNet-9:
SimCLR with width = 0.0625 and num_ftrs = 16
ResNet-9:
SimCLR with width = 0.125 and num_ftrs = 16
ResNet-18:
SimCLR with width = 1.0 and num_ftrs = 16
ResNet-18:
SimCLR with width = 1.0 and num_ftrs = 32
ResNet-34:
SimCLR with width = 1.0 and num_ftrs = 16
ResNet-34:
SimCLR with width = 1.0 and num_ftrs = 32
Returns:
A list of available checkpoints as URLs.
"""
return [item for key, item in ZOO.items()]
| 1,573 | 40.421053 | 135 | py |
lightly | lightly-master/lightly/models/modules/__init__.py | """The lightly.models.modules package provides reusable modules.
This package contains reusable modules such as the NNmemoryBankModule which
can be combined with any lightly model.
"""
# Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
from lightly import _torchvision_vit_available
from lightly.models.modules.heads import (
BarlowTwinsProjectionHead,
BYOLPredictionHead,
BYOLProjectionHead,
DINOProjectionHead,
MoCoProjectionHead,
NNCLRPredictionHead,
NNCLRProjectionHead,
SimCLRProjectionHead,
SimSiamPredictionHead,
SimSiamProjectionHead,
SMoGPredictionHead,
SMoGProjectionHead,
SMoGPrototypes,
SwaVProjectionHead,
SwaVPrototypes,
)
from lightly.models.modules.nn_memory_bank import NNMemoryBankModule
if _torchvision_vit_available:
# Requires torchvision >=0.12
from lightly.models.modules.masked_autoencoder import (
MAEBackbone,
MAEDecoder,
MAEEncoder,
)
| 987 | 25 | 75 | py |
lightly | lightly-master/lightly/models/modules/heads.py | """ Projection and Prediction Heads for Self-supervised Learning """
# Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
from lightly.models import utils
class ProjectionHead(nn.Module):
"""Base class for all projection and prediction heads.
Args:
blocks:
List of tuples, each denoting one block of the projection head MLP.
Each tuple reads (in_features, out_features, batch_norm_layer,
non_linearity_layer).
Examples:
>>> # the following projection head has two blocks
>>> # the first block uses batch norm an a ReLU non-linearity
>>> # the second block is a simple linear layer
>>> projection_head = ProjectionHead([
>>> (256, 256, nn.BatchNorm1d(256), nn.ReLU()),
>>> (256, 128, None, None)
>>> ])
"""
def __init__(
self, blocks: List[Tuple[int, int, Optional[nn.Module], Optional[nn.Module]]]
):
super(ProjectionHead, self).__init__()
layers = []
for input_dim, output_dim, batch_norm, non_linearity in blocks:
use_bias = not bool(batch_norm)
layers.append(nn.Linear(input_dim, output_dim, bias=use_bias))
if batch_norm:
layers.append(batch_norm)
if non_linearity:
layers.append(non_linearity)
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor):
"""Computes one forward pass through the projection head.
Args:
x:
Input of shape bsz x num_ftrs.
"""
return self.layers(x)
class BarlowTwinsProjectionHead(ProjectionHead):
"""Projection head used for Barlow Twins.
"The projector network has three linear layers, each with 8192 output
units. The first two layers of the projector are followed by a batch
normalization layer and rectified linear units." [0]
[0]: 2021, Barlow Twins, https://arxiv.org/abs/2103.03230
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 8192, output_dim: int = 8192
):
super(BarlowTwinsProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class BYOLProjectionHead(ProjectionHead):
"""Projection head used for BYOL.
"This MLP consists in a linear layer with output size 4096 followed by
batch normalization, rectified linear units (ReLU), and a final
linear layer with output dimension 256." [0]
[0]: BYOL, 2020, https://arxiv.org/abs/2006.07733
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 4096, output_dim: int = 256
):
super(BYOLProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class BYOLPredictionHead(ProjectionHead):
"""Prediction head used for BYOL.
"This MLP consists in a linear layer with output size 4096 followed by
batch normalization, rectified linear units (ReLU), and a final
linear layer with output dimension 256." [0]
[0]: BYOL, 2020, https://arxiv.org/abs/2006.07733
"""
def __init__(
self, input_dim: int = 256, hidden_dim: int = 4096, output_dim: int = 256
):
super(BYOLPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class MoCoProjectionHead(ProjectionHead):
"""Projection head used for MoCo.
"(...) we replace the fc head in MoCo with a 2-layer MLP head (hidden layer
2048-d, with ReLU)" [1]
"The projection head is a 3-layer MLP. The prediction head is a 2-layer MLP. The
hidden layers of both MLPs are 4096-d and are with ReLU; the output layers of both
MLPs are 256-d, without ReLU. In MoCo v3, all layers in both MLPs have BN" [2]
[0]: MoCo v1, 2020, https://arxiv.org/abs/1911.05722
[1]: MoCo v2, 2020, https://arxiv.org/abs/2003.04297
[2]: MoCo v3, 2021, https://arxiv.org/abs/2104.02057
"""
def __init__(
self,
input_dim: int = 2048,
hidden_dim: int = 2048,
output_dim: int = 128,
num_layers: int = 2,
batch_norm: bool = False,
):
"""Initialize a new MoCoProjectionHead instance.
Args:
input_dim: Number of input dimensions.
hidden_dim: Number of hidden dimensions (2048 for v2, 4096 for v3).
output_dim: Number of output dimensions (128 for v2, 256 for v3).
num_layers: Number of hidden layers (2 for v2, 3 for v3).
batch_norm: Whether or not to use batch norms.
(False for v2, True for v3)
"""
layers: List[Tuple[int, int, Optional[nn.Module], Optional[nn.Module]]] = []
layers.append(
(
input_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(),
)
)
for _ in range(2, num_layers):
layers.append(
(
hidden_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(),
)
)
layers.append(
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim) if batch_norm else None,
None,
)
)
super().__init__(layers)
class NNCLRProjectionHead(ProjectionHead):
"""Projection head used for NNCLR.
"The architectureof the projection MLP is 3 fully connected layers of sizes
[2048,2048,d] where d is the embedding size used to apply the loss. We use
d = 256 in the experiments unless otherwise stated. All fully-connected
layers are followed by batch-normalization [36]. All the batch-norm layers
except the last layer are followed by ReLU activation." [0]
[0]: NNCLR, 2021, https://arxiv.org/abs/2104.14548
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 256
):
super(NNCLRProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, nn.BatchNorm1d(output_dim), None),
]
)
class NNCLRPredictionHead(ProjectionHead):
"""Prediction head used for NNCLR.
"The architecture of the prediction MLP g is 2 fully-connected layers
of size [4096,d]. The hidden layer of the prediction MLP is followed by
batch-norm and ReLU. The last layer has no batch-norm or activation." [0]
[0]: NNCLR, 2021, https://arxiv.org/abs/2104.14548
"""
def __init__(
self, input_dim: int = 256, hidden_dim: int = 4096, output_dim: int = 256
):
super(NNCLRPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SimCLRProjectionHead(ProjectionHead):
"""Projection head used for SimCLR.
"We use a MLP with one hidden layer to obtain zi = g(h) = W_2 * σ(W_1 * h)
where σ is a ReLU non-linearity." [0]
"We use a 3-layer MLP projection head on top of a ResNet encoder." [1]
- [0] SimCLR v1, 2020, https://arxiv.org/abs/2002.05709
- [1] SimCLR v2, 2020, https://arxiv.org/abs/2006.10029
"""
def __init__(
self,
input_dim: int = 2048,
hidden_dim: int = 2048,
output_dim: int = 128,
num_layers: int = 2,
batch_norm: bool = True,
):
"""Initialize a new SimCLRProjectionHead instance.
Args:
input_dim: Number of input dimensions.
hidden_dim: Number of hidden dimensions.
output_dim: Number of output dimensions.
num_layers: Number of hidden layers (2 for v1, 3+ for v2).
batch_norm: Whether or not to use batch norms.
"""
layers: List[Tuple[int, int, Optional[nn.Module], Optional[nn.Module]]] = []
layers.append(
(
input_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(),
)
)
for _ in range(2, num_layers):
layers.append(
(
hidden_dim,
hidden_dim,
nn.BatchNorm1d(hidden_dim) if batch_norm else None,
nn.ReLU(),
)
)
layers.append(
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim) if batch_norm else None,
None,
)
)
super().__init__(layers)
class SimSiamProjectionHead(ProjectionHead):
"""Projection head used for SimSiam.
"The projection MLP (in f) has BN applied to each fully-connected (fc)
layer, including its output fc. Its output fc has no ReLU. The hidden fc is
2048-d. This MLP has 3 layers." [0]
[0]: SimSiam, 2020, https://arxiv.org/abs/2011.10566
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 2048
):
super(SimSiamProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim, affine=False),
None,
),
]
)
class SMoGPrototypes(nn.Module):
"""SMoG prototypes module for synchronous momentum grouping."""
def __init__(
self,
group_features: torch.Tensor,
beta: float,
):
super(SMoGPrototypes, self).__init__()
self.group_features = nn.Parameter(group_features, requires_grad=False)
self.beta = beta
def forward(
self, x: torch.Tensor, group_features: torch.Tensor, temperature: float = 0.1
) -> torch.Tensor:
"""Computes the logits for given model outputs and group features.
Args:
x:
Tensor of shape bsz x dim.
group_features:
Momentum updated group features of shape n_groups x dim.
temperature:
Temperature parameter for calculating the logits.
Returns:
The logits.
"""
x = torch.nn.functional.normalize(x, dim=1)
group_features = torch.nn.functional.normalize(group_features, dim=1)
logits = torch.mm(x, group_features.t())
return logits / temperature
def get_updated_group_features(self, x: torch.Tensor) -> None:
"""Performs the synchronous momentum update of the group vectors.
Args:
x:
Tensor of shape bsz x dim.
Returns:
The updated group features.
"""
assignments = self.assign_groups(x)
group_features = torch.clone(self.group_features.data)
for assigned_class in torch.unique(assignments):
mask = assignments == assigned_class
group_features[assigned_class] = self.beta * self.group_features[
assigned_class
] + (1 - self.beta) * x[mask].mean(axis=0)
return group_features
def set_group_features(self, x: torch.Tensor) -> None:
"""Sets the group features and asserts they don't require gradient."""
self.group_features.data = x.to(self.group_features.device)
@torch.no_grad()
def assign_groups(self, x: torch.Tensor) -> torch.LongTensor:
"""Assigns each representation in x to a group based on cosine similarity.
Args:
Tensor of shape bsz x dim.
Returns:
LongTensor of shape bsz indicating group assignments.
"""
return torch.argmax(self.forward(x, self.group_features), dim=-1)
class SMoGProjectionHead(ProjectionHead):
"""Projection head used for SMoG.
"The two kinds of head are both a two-layer MLP and their hidden layer is
followed by a BatchNorm [28] and an activation function. (...) The output
layer of projection head also has BN" [0]
[0]: SMoG, 2022, https://arxiv.org/pdf/2207.06167.pdf
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 128
):
super(SMoGProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(
hidden_dim,
output_dim,
nn.BatchNorm1d(output_dim, affine=False),
None,
),
]
)
class SMoGPredictionHead(ProjectionHead):
"""Prediction head used for SMoG.
"The two kinds of head are both a two-layer MLP and their hidden layer is
followed by a BatchNorm [28] and an activation function. (...) The output
layer of projection head also has BN" [0]
[0]: SMoG, 2022, https://arxiv.org/pdf/2207.06167.pdf
"""
def __init__(
self, input_dim: int = 128, hidden_dim: int = 2048, output_dim: int = 128
):
super(SMoGPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SimSiamPredictionHead(ProjectionHead):
"""Prediction head used for SimSiam.
"The prediction MLP (h) has BN applied to its hidden fc layers. Its output
fc does not have BN (...) or ReLU. This MLP has 2 layers." [0]
[0]: SimSiam, 2020, https://arxiv.org/abs/2011.10566
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 512, output_dim: int = 2048
):
super(SimSiamPredictionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SwaVProjectionHead(ProjectionHead):
"""Projection head used for SwaV.
[0]: SwAV, 2020, https://arxiv.org/abs/2006.09882
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 2048, output_dim: int = 128
):
super(SwaVProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class SwaVPrototypes(nn.Module):
"""Multihead Prototypes used for SwaV.
Each output feature is assigned to a prototype, SwaV solves the swapped
prediction problem where the features of one augmentation are used to
predict the assigned prototypes of the other augmentation.
Attributes:
input_dim:
The input dimension of the head.
n_prototypes:
Number of prototypes.
n_steps_frozen_prototypes:
Number of steps during which we keep the prototypes fixed.
Examples:
>>> # use features with 128 dimensions and 512 prototypes
>>> prototypes = SwaVPrototypes(128, 512)
>>>
>>> # pass batch through backbone and projection head.
>>> features = model(x)
>>> features = nn.functional.normalize(features, dim=1, p=2)
>>>
>>> # logits has shape bsz x 512
>>> logits = prototypes(features)
"""
def __init__(
self,
input_dim: int = 128,
n_prototypes: Union[List[int], int] = 3000,
n_steps_frozen_prototypes: int = 0,
):
super(SwaVPrototypes, self).__init__()
# Default to a list of 1 if n_prototypes is an int.
self.n_prototypes = (
n_prototypes if isinstance(n_prototypes, list) else [n_prototypes]
)
self._is_single_prototype = True if isinstance(n_prototypes, int) else False
self.heads = nn.ModuleList(
[nn.Linear(input_dim, prototypes) for prototypes in self.n_prototypes]
)
self.n_steps_frozen_prototypes = n_steps_frozen_prototypes
def forward(self, x, step=None) -> Union[torch.Tensor, List[torch.Tensor]]:
self._freeze_prototypes_if_required(step)
out = []
for layer in self.heads:
out.append(layer(x))
return out[0] if self._is_single_prototype else out
def normalize(self):
"""Normalizes the prototypes so that they are on the unit sphere."""
for layer in self.heads:
utils.normalize_weight(layer.weight)
def _freeze_prototypes_if_required(self, step):
if self.n_steps_frozen_prototypes > 0:
if step is None:
raise ValueError(
"`n_steps_frozen_prototypes` is greater than 0, please"
" provide the `step` argument to the `forward()` method."
)
self.requires_grad_(step >= self.n_steps_frozen_prototypes)
class DINOProjectionHead(ProjectionHead):
"""Projection head used in DINO.
"The projection head consists of a 3-layer multi-layer perceptron (MLP)
with hidden dimension 2048 followed by l2 normalization and a weight
normalized fully connected layer with K dimensions, which is similar to the
design from SwAV [1]." [0]
- [0]: DINO, 2021, https://arxiv.org/abs/2104.14294
- [1]: SwAV, 2020, https://arxiv.org/abs/2006.09882
Attributes:
input_dim:
The input dimension of the head.
hidden_dim:
The hidden dimension.
bottleneck_dim:
Dimension of the bottleneck in the last layer of the head.
output_dim:
The output dimension of the head.
batch_norm:
Whether to use batch norm or not. Should be set to False when using
a vision transformer backbone.
freeze_last_layer:
Number of epochs during which we keep the output layer fixed.
Typically doing so during the first epoch helps training. Try
increasing this value if the loss does not decrease.
norm_last_layer:
Whether or not to weight normalize the last layer of the DINO head.
Not normalizing leads to better performance but can make the
training unstable.
"""
def __init__(
self,
input_dim: int = 2048,
hidden_dim: int = 2048,
bottleneck_dim: int = 256,
output_dim: int = 65536,
batch_norm: bool = False,
freeze_last_layer: int = -1,
norm_last_layer: bool = True,
):
bn = nn.BatchNorm1d(hidden_dim) if batch_norm else None
super().__init__(
[
(input_dim, hidden_dim, bn, nn.GELU()),
(hidden_dim, hidden_dim, bn, nn.GELU()),
(hidden_dim, bottleneck_dim, None, None),
]
)
self.apply(self._init_weights)
self.freeze_last_layer = freeze_last_layer
self.last_layer = nn.utils.weight_norm(
nn.Linear(bottleneck_dim, output_dim, bias=False)
)
self.last_layer.weight_g.data.fill_(1)
# Option to normalize last layer.
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def cancel_last_layer_gradients(self, current_epoch: int):
"""Cancel last layer gradients to stabilize the training."""
if current_epoch >= self.freeze_last_layer:
return
for param in self.last_layer.parameters():
param.grad = None
def _init_weights(self, module):
"""Initializes layers with a truncated normal distribution."""
if isinstance(module, nn.Linear):
utils._no_grad_trunc_normal(
module.weight,
mean=0,
std=0.2,
a=-2,
b=2,
)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Computes one forward pass through the head."""
x = self.layers(x)
# l2 normalization
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
class MSNProjectionHead(ProjectionHead):
"""Projection head for MSN [0].
"We train with a 3-layer projection head with output dimension 256 and
batch-normalization at the input and hidden layers.." [0]
Code inspired by [1].
- [0]: Masked Siamese Networks, 2022, https://arxiv.org/abs/2204.07141
- [1]: https://github.com/facebookresearch/msn
Attributes:
input_dim:
Input dimension, default value 768 is for a ViT base model.
hidden_dim:
Hidden dimension.
output_dim:
Output dimension.
"""
def __init__(
self,
input_dim: int = 768,
hidden_dim: int = 2048,
output_dim: int = 256,
):
super().__init__(
blocks=[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.GELU()),
(hidden_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.GELU()),
(hidden_dim, output_dim, None, None),
]
)
class TiCoProjectionHead(ProjectionHead):
"""Projection head used for TiCo.
"This MLP consists in a linear layer with output size 4096 followed by
batch normalization, rectified linear units (ReLU), and a final
linear layer with output dimension 256." [0]
[0]: TiCo, 2022, https://arxiv.org/pdf/2206.10698.pdf
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 4096, output_dim: int = 256
):
super(TiCoProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.BatchNorm1d(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
class VicRegLLocalProjectionHead(ProjectionHead):
"""Projection head used for the local head of VICRegL.
The projector network has three linear layers. The first two layers of the projector
are followed by a batch normalization layer and rectified linear units.
2022, VICRegL, https://arxiv.org/abs/2210.01571
"""
def __init__(
self, input_dim: int = 2048, hidden_dim: int = 8192, output_dim: int = 8192
):
super(VicRegLLocalProjectionHead, self).__init__(
[
(input_dim, hidden_dim, nn.LayerNorm(hidden_dim), nn.ReLU()),
(hidden_dim, hidden_dim, nn.LayerNorm(hidden_dim), nn.ReLU()),
(hidden_dim, output_dim, None, None),
]
)
| 23,561 | 31.861925 | 88 | py |
lightly | lightly-master/lightly/models/modules/ijepa.py | import math
from functools import partial
from typing import Callable, List, Optional
import numpy as np
import torch
import torch.nn as nn
from torchvision.models import vision_transformer
from torchvision.models.vision_transformer import ConvStemConfig
from lightly.models import utils
class IJEPAPredictor(vision_transformer.Encoder):
"""Predictor for the I-JEPA model [0].
Experimental: Support for I-JEPA is experimental, there might be breaking changes
in the future.
Predict patch embeddings. Code inspired by [1].
- [0]: Joint-Embedding Predictive Architecture, 2023, https://arxiv.org/abs/2301.08243
- [1]: https://github.com/facebookresearch/ijepa
Attributes:
seq_length:
Token sequence length, including the class token.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
hidden_dim:
Dimension of the input and output tokens.
predictor_embed_dim:
Dimension of inner predicted tokens
mlp_dim:
Dimension of the MLP in the transformer block.
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
"""
def __init__(
self,
seq_length: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
predictor_embed_dim: int,
num_patches: int,
mlp_dim: int,
dropout: float,
attention_dropout: float,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
**kwargs
):
super().__init__(
seq_length=seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
self.predictor_embed = nn.Linear(mlp_dim, predictor_embed_dim, bias=True)
self.mask_token = nn.Parameter(torch.zeros(1, 1, predictor_embed_dim))
self.predictor_proj = nn.Linear(predictor_embed_dim, mlp_dim, bias=True)
self.predictor_pos_embed = nn.Parameter(
torch.zeros(1, num_patches, predictor_embed_dim), requires_grad=False
)
predictor_pos_embed = utils.get_2d_sincos_pos_embed(
self.predictor_pos_embed.shape[-1], int(num_patches**0.5), cls_token=False
)
self.predictor_pos_embed.data.copy_(
torch.from_numpy(predictor_pos_embed).float().unsqueeze(0)
)
@classmethod
def from_vit_encoder(cls, vit_encoder, num_patches):
"""Creates a I-JEPA predictor backbone (mhas and layernorm) from a torchvision ViT encoder."""
# Create a new instance with dummy values as they will be overwritten
# by the copied vit_encoder attributes
encoder = cls(
seq_length=1,
num_layers=1,
num_heads=1,
hidden_dim=1,
predictor_embed_dim=768,
mlp_dim=768,
num_patches=num_patches,
dropout=0,
attention_dropout=0,
)
encoder.layers = vit_encoder.layers
encoder.ln = vit_encoder.ln
return encoder
def forward(self, x, masks_x, masks):
assert (masks is not None) and (
masks_x is not None
), "Cannot run predictor without mask indices"
if not isinstance(masks_x, list):
masks_x = [masks_x]
if not isinstance(masks, list):
masks = [masks]
B = len(x) // len(masks_x)
x = self.predictor_embed(x)
x_pos_embed = self.predictor_pos_embed.repeat(B, 1, 1)
x += utils.apply_masks(x_pos_embed, masks_x)
_, N_ctxt, _ = x.shape
pos_embs = self.predictor_pos_embed.repeat(B, 1, 1)
pos_embs = utils.apply_masks(pos_embs, masks)
pos_embs = utils.repeat_interleave_batch(pos_embs, B, repeat=len(masks_x))
pred_tokens = self.mask_token.repeat(pos_embs.size(0), pos_embs.size(1), 1)
pred_tokens += pos_embs
x = x.repeat(len(masks), 1, 1)
x = torch.cat([x, pred_tokens], dim=1)
x = self.ln(self.layers(x))
x = x[:, N_ctxt:]
x = self.predictor_proj(x)
return x
class IJEPAEncoder(vision_transformer.Encoder):
"""Encoder for the I-JEPA model [0].
Experimental: Support for I-JEPA is experimental, there might be breaking changes
in the future.
Encodes patch embeddings. Code inspired by [1].
- [0]: Joint-Embedding Predictive Architecture, 2023, https://arxiv.org/abs/2301.08243
- [1]: https://github.com/facebookresearch/ijepa
Attributes:
seq_length:
Token sequence length, including the class token.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
hidden_dim:
Dimension of the input and output tokens.
mlp_dim:
Dimension of the MLP in the transformer block.
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
"""
def __init__(
self,
seq_length: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float,
attention_dropout: float,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__(
seq_length=seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
@classmethod
def from_vit_encoder(cls, vit_encoder: vision_transformer.Encoder):
"""Creates a IJEPA encoder from a torchvision ViT encoder."""
# Create a new instance with dummy values as they will be overwritten
# by the copied vit_encoder attributes
encoder = cls(
seq_length=1,
num_layers=1,
num_heads=1,
hidden_dim=1,
mlp_dim=1,
dropout=0,
attention_dropout=0,
)
encoder.pos_embedding = vit_encoder.pos_embedding
encoder.dropout = vit_encoder.dropout
encoder.layers = vit_encoder.layers
encoder.ln = vit_encoder.ln
return encoder
def forward(
self, input: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Encode input tokens.
Args:
input:
Batch of token sequences.
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be encoded.
Returns:
Batch of encoded output tokens.
"""
input = input + self.interpolate_pos_encoding(input)
if idx_keep is not None:
input = utils.apply_masks(input, idx_keep)
return self.ln(self.layers(self.dropout(input)))
def interpolate_pos_encoding(self, input: torch.Tensor):
"""Returns the interpolated positional embedding for the given input.
This function interpolates self.pos_embedding for all tokens in the input,
ignoring the class token. This allows encoding variable sized images.
Args:
input:
Input tensor with shape (batch_size, num_sequences).
"""
# code copied from:
# https://github.com/facebookresearch/msn/blob/4388dc1eadbe3042b85d3296d41b9b207656e043/src/deit.py#L291
npatch = input.shape[1] - 1
N = self.pos_embedding.shape[1] - 1
if npatch == N:
return self.pos_embedding
class_emb = self.pos_embedding[:, 0]
pos_embedding = self.pos_embedding[:, 1:]
dim = input.shape[-1]
pos_embedding = nn.functional.interpolate(
pos_embedding.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor=math.sqrt(npatch / N),
mode="bicubic",
)
pos_embedding = pos_embedding.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_emb.unsqueeze(0), pos_embedding), dim=1)
class IJEPABackbone(vision_transformer.VisionTransformer):
"""Encoder for the I-JEPA model [0].
Experimental: Support for I-JEPA is experimental, there might be breaking changes
in the future.
Converts images into patches and encodes them. Code inspired by [1].
Note that this implementation uses a learned positional embedding while [0]
uses a fixed positional embedding.
- [0]: Joint-Embedding Predictive Architecture, 2023, https://arxiv.org/abs/2301.08243
- [1]: https://github.com/facebookresearch/ijepa
Attributes:
image_size:
Input image size.
patch_size:
Width and height of the image patches. image_size must be a multiple
of patch_size.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
hidden_dim:
Dimension of the input and output tokens.
mlp_dim:
Dimension of the MLP in the transformer block.
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
num_classes:
Number of classes for the classification head. Currently not used.
representation_size:
If specified, an additional linear layer is added before the
classification head to change the token dimension from hidden_dim
to representation_size. Currently not used.
norm_layer:
Callable that creates a normalization layer.
"""
def __init__(
self,
image_size: int,
patch_size: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float = 0,
attention_dropout: float = 0,
num_classes: int = 1000,
representation_size: Optional[int] = None,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
conv_stem_configs: Optional[List[ConvStemConfig]] = None,
):
super().__init__(
image_size=image_size,
patch_size=patch_size,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
num_classes=num_classes,
representation_size=representation_size,
norm_layer=norm_layer,
conv_stem_configs=conv_stem_configs,
)
self.encoder = IJEPAEncoder(
seq_length=self.seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
@classmethod
def from_vit(cls, vit: vision_transformer.VisionTransformer):
"""Creates a IJEPABackbone from a torchvision ViT model."""
# Create a new instance with dummy values as they will be overwritten
# by the copied vit_encoder attributes
backbone = cls(
image_size=vit.image_size,
patch_size=vit.patch_size,
num_layers=1,
num_heads=1,
hidden_dim=vit.hidden_dim,
mlp_dim=vit.mlp_dim,
dropout=vit.dropout,
attention_dropout=vit.attention_dropout,
num_classes=vit.num_classes,
representation_size=vit.representation_size,
norm_layer=vit.norm_layer,
)
backbone.conv_proj = vit.conv_proj
backbone.class_token = vit.class_token
backbone.seq_length = vit.seq_length
backbone.heads = vit.heads
backbone.encoder = IJEPAEncoder.from_vit_encoder(vit.encoder)
return backbone
def forward(
self, images: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Returns encoded class tokens from a batch of images.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be passed to the
encoder.
Returns:
Tensor with shape (batch_size, hidden_dim) containing the
encoded class token for every image.
"""
if idx_keep is not None:
if not isinstance(idx_keep, list):
idx_keep = [idx_keep]
out = self.encode(images, idx_keep)
return out
def encode(
self, images: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Returns encoded class and patch tokens from images.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be passed to the
encoder.
Returns:
Tensor with shape (batch_size, sequence_length, hidden_dim)
containing the encoded class and patch tokens for every image.
"""
out = self.images_to_tokens(images, prepend_class_token=True)
return self.encoder(out, idx_keep)
def images_to_tokens(
self, images: torch.Tensor, prepend_class_token: bool
) -> torch.Tensor:
"""Converts images into patch tokens.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
Returns:
Tensor with shape (batch_size, sequence_length - 1, hidden_dim)
containing the patch tokens.
"""
x = self.conv_proj(images)
tokens = x.flatten(2).transpose(1, 2)
if prepend_class_token:
tokens = utils.prepend_class_token(tokens, self.class_token)
return tokens
| 15,110 | 33.817972 | 112 | py |
lightly | lightly-master/lightly/models/modules/masked_autoencoder.py | from __future__ import annotations
import math
from functools import partial
from typing import Callable, List, Optional
import torch
import torch.nn as nn
# vision_transformer requires torchvision >= 0.12
from torchvision.models import vision_transformer
from torchvision.models.vision_transformer import ConvStemConfig
from lightly.models import utils
class MAEEncoder(vision_transformer.Encoder):
"""Encoder for the Masked Autoencoder model [0].
Encodes patch embeddings. Code inspired by [1].
- [0]: Masked Autoencoder, 2021, https://arxiv.org/abs/2111.06377
- [1]: https://github.com/facebookresearch/mae
Attributes:
seq_length:
Token sequence length, including the class token.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
hidden_dim:
Dimension of the input and output tokens.
mlp_dim:
Dimension of the MLP in the transformer block.
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
"""
def __init__(
self,
seq_length: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float,
attention_dropout: float,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__(
seq_length=seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
@classmethod
def from_vit_encoder(cls, vit_encoder: vision_transformer.Encoder) -> MAEEncoder:
"""Creates a MAEEncoder from a torchvision ViT encoder."""
# Create a new instance with dummy values as they will be overwritten
# by the copied vit_encoder attributes
encoder = cls(
seq_length=1,
num_layers=1,
num_heads=1,
hidden_dim=1,
mlp_dim=1,
dropout=0,
attention_dropout=0,
)
encoder.pos_embedding = vit_encoder.pos_embedding
encoder.dropout = vit_encoder.dropout
encoder.layers = vit_encoder.layers
encoder.ln = vit_encoder.ln
return encoder
def forward(
self, input: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Encode input tokens.
Args:
input:
Batch of token sequences.
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be encoded.
Returns:
Batch of encoded output tokens.
"""
input = input + self.interpolate_pos_encoding(input)
if idx_keep is not None:
input = utils.get_at_index(input, idx_keep)
return self.ln(self.layers(self.dropout(input)))
def interpolate_pos_encoding(self, input: torch.Tensor):
"""Returns the interpolated positional embedding for the given input.
This function interpolates self.pos_embedding for all tokens in the input,
ignoring the class token. This allows encoding variable sized images.
Args:
input:
Input tensor with shape (batch_size, num_sequences).
"""
# code copied from:
# https://github.com/facebookresearch/msn/blob/4388dc1eadbe3042b85d3296d41b9b207656e043/src/deit.py#L291
npatch = input.shape[1] - 1
N = self.pos_embedding.shape[1] - 1
if npatch == N:
return self.pos_embedding
class_emb = self.pos_embedding[:, 0]
pos_embedding = self.pos_embedding[:, 1:]
dim = input.shape[-1]
pos_embedding = nn.functional.interpolate(
pos_embedding.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor=math.sqrt(npatch / N),
mode="bicubic",
)
pos_embedding = pos_embedding.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_emb.unsqueeze(0), pos_embedding), dim=1)
class MAEBackbone(vision_transformer.VisionTransformer):
"""Backbone for the Masked Autoencoder model [0].
Converts images into patches and encodes them. Code inspired by [1].
Note that this implementation uses a learned positional embedding while [0]
uses a fixed positional embedding.
- [0]: Masked Autoencoder, 2021, https://arxiv.org/abs/2111.06377
- [1]: https://github.com/facebookresearch/mae
- [2]: Early Convolutions Help Transformers See Better, 2021, https://arxiv.org/abs/2106.14881.
Attributes:
image_size:
Input image size.
patch_size:
Width and height of the image patches. image_size must be a multiple
of patch_size.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
hidden_dim:
Dimension of the input and output tokens.
mlp_dim:
Dimension of the MLP in the transformer block.
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
num_classes:
Number of classes for the classification head. Currently not used.
representation_size:
If specified, an additional linear layer is added before the
classification head to change the token dimension from hidden_dim
to representation_size. Currently not used.
norm_layer:
Callable that creates a normalization layer.
conv_stem_configs:
If specified, a convolutional stem is added at the beggining of the
network following [2]. Not used in the original Masked Autoencoder
paper [0].
"""
def __init__(
self,
image_size: int,
patch_size: int,
num_layers: int,
num_heads: int,
hidden_dim: int,
mlp_dim: int,
dropout: float = 0,
attention_dropout: float = 0,
num_classes: int = 1000,
representation_size: Optional[int] = None,
norm_layer: Callable[..., torch.nn.Module] = partial(nn.LayerNorm, eps=1e-6),
conv_stem_configs: Optional[List[ConvStemConfig]] = None,
):
super().__init__(
image_size=image_size,
patch_size=patch_size,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
num_classes=num_classes,
representation_size=representation_size,
norm_layer=norm_layer,
conv_stem_configs=conv_stem_configs,
)
self.encoder = MAEEncoder(
seq_length=self.seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
@classmethod
def from_vit(cls, vit: vision_transformer.VisionTransformer) -> MAEBackbone:
"""Creates a MAEBackbone from a torchvision ViT model."""
# Create a new instance with dummy values as they will be overwritten
# by the copied vit_encoder attributes
backbone = cls(
image_size=vit.image_size,
patch_size=vit.patch_size,
num_layers=1,
num_heads=1,
hidden_dim=vit.hidden_dim,
mlp_dim=vit.mlp_dim,
dropout=vit.dropout,
attention_dropout=vit.attention_dropout,
num_classes=vit.num_classes,
representation_size=vit.representation_size,
norm_layer=vit.norm_layer,
)
backbone.conv_proj = vit.conv_proj
backbone.class_token = vit.class_token
backbone.seq_length = vit.seq_length
backbone.heads = vit.heads
backbone.encoder = MAEEncoder.from_vit_encoder(vit.encoder)
return backbone
def forward(
self, images: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Returns encoded class tokens from a batch of images.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be passed to the
encoder.
Returns:
Tensor with shape (batch_size, hidden_dim) containing the
encoded class token for every image.
"""
out = self.encode(images, idx_keep)
class_token = out[:, 0]
return class_token
def encode(
self, images: torch.Tensor, idx_keep: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Returns encoded class and patch tokens from images.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
idx_keep:
Tensor with shape (batch_size, num_tokens_to_keep) where each
entry is an index of the token to keep in the respective batch.
If specified, only the indexed tokens will be passed to the
encoder.
Returns:
Tensor with shape (batch_size, sequence_length, hidden_dim)
containing the encoded class and patch tokens for every image.
"""
out = self.images_to_tokens(images, prepend_class_token=True)
return self.encoder(out, idx_keep)
def images_to_tokens(
self, images: torch.Tensor, prepend_class_token: bool
) -> torch.Tensor:
"""Converts images into patch tokens.
Args:
images:
Tensor with shape (batch_size, channels, image_size, image_size).
Returns:
Tensor with shape (batch_size, sequence_length - 1, hidden_dim)
containing the patch tokens.
"""
x = self.conv_proj(images)
tokens = x.flatten(2).transpose(1, 2)
if prepend_class_token:
tokens = utils.prepend_class_token(tokens, self.class_token)
return tokens
class MAEDecoder(vision_transformer.Encoder):
"""Decoder for the Masked Autoencoder model [0].
Decodes encoded patches and predicts pixel values for every patch.
Code inspired by [1].
- [0]: Masked Autoencoder, 2021, https://arxiv.org/abs/2111.06377
- [1]: https://github.com/facebookresearch/mae
Attributes:
seq_length:
Token sequence length, including the class token.
num_layers:
Number of transformer blocks.
num_heads:
Number of attention heads.
embed_input_dim:
Dimension of the input tokens. Usually be equal to the hidden
dimension of the MAEEncoder or MAEBackbone.
hidden_dim:
Dimension of the decoder tokens.
mlp_dim:
Dimension of the MLP in the transformer block.
out_dim:
Output dimension of the prediction for a single patch. Usually equal
to (3 * patch_size ** 2).
dropout:
Percentage of elements set to zero after the MLP in the transformer.
attention_dropout:
Percentage of elements set to zero after the attention head.
"""
def __init__(
self,
seq_length: int,
num_layers: int,
num_heads: int,
embed_input_dim: int,
hidden_dim: int,
mlp_dim: int,
out_dim: int,
dropout: float = 0.0,
attention_dropout: float = 0.0,
norm_layer: Callable[..., nn.Module] = partial(nn.LayerNorm, eps=1e-6),
):
super().__init__(
seq_length=seq_length,
num_layers=num_layers,
num_heads=num_heads,
hidden_dim=hidden_dim,
mlp_dim=mlp_dim,
dropout=dropout,
attention_dropout=attention_dropout,
norm_layer=norm_layer,
)
self.decoder_embed = nn.Linear(embed_input_dim, hidden_dim, bias=True)
self.prediction_head = nn.Linear(hidden_dim, out_dim)
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Returns predicted pixel values from encoded tokens.
Args:
input:
Tensor with shape (batch_size, seq_length, embed_input_dim).
Returns:
Tensor with shape (batch_size, seq_length, out_dim).
"""
out = self.embed(input)
out = self.decode(out)
return self.predict(out)
def embed(self, input: torch.Tensor) -> torch.Tensor:
"""Embeds encoded input tokens into decoder token dimension.
This is a single linear layer that changes the token dimension from
embed_input_dim to hidden_dim.
Args:
input:
Tensor with shape (batch_size, seq_length, embed_input_dim)
containing the encoded tokens.
Returns:
Tensor with shape (batch_size, seq_length, hidden_dim) containing
the embedded tokens.
"""
return self.decoder_embed(input)
def decode(self, input: torch.Tensor) -> torch.Tensor:
"""Forward pass through the decoder transformer.
Args:
input:
Tensor with shape (batch_size, seq_length, hidden_dim) containing
the encoded tokens.
Returns:
Tensor with shape (batch_size, seq_length, hidden_dim) containing
the decoded tokens.
"""
return super().forward(input)
def predict(self, input: torch.Tensor) -> torch.Tensor:
"""Predics pixel values from decoded tokens.
Args:
input:
Tensor with shape (batch_size, seq_length, hidden_dim) containing
the decoded tokens.
Returns:
Tensor with shape (batch_size, seq_length, out_dim) containing
predictions for each token.
"""
return self.prediction_head(input)
| 14,929 | 33.560185 | 112 | py |
lightly | lightly-master/lightly/models/modules/nn_memory_bank.py | """ Nearest Neighbour Memory Bank Module """
# Copyright (c) 2021. Lightly AG and its affiliates.
# All Rights Reserved
import torch
from lightly.loss.memory_bank import MemoryBankModule
class NNMemoryBankModule(MemoryBankModule):
"""Nearest Neighbour Memory Bank implementation
This class implements a nearest neighbour memory bank as described in the
NNCLR paper[0]. During the forward pass we return the nearest neighbour
from the memory bank.
[0] NNCLR, 2021, https://arxiv.org/abs/2104.14548
Attributes:
size:
Number of keys the memory bank can store. If set to 0,
memory bank is not used.
Examples:
>>> model = NNCLR(backbone)
>>> criterion = NTXentLoss(temperature=0.1)
>>>
>>> nn_replacer = NNmemoryBankModule(size=2 ** 16)
>>>
>>> # forward pass
>>> (z0, p0), (z1, p1) = model(x0, x1)
>>> z0 = nn_replacer(z0.detach(), update=False)
>>> z1 = nn_replacer(z1.detach(), update=True)
>>>
>>> loss = 0.5 * (criterion(z0, p1) + criterion(z1, p0))
"""
def __init__(self, size: int = 2**16):
super(NNMemoryBankModule, self).__init__(size)
def forward(self, output: torch.Tensor, update: bool = False):
"""Returns nearest neighbour of output tensor from memory bank
Args:
output: The torch tensor for which you want the nearest neighbour
update: If `True` updated the memory bank by adding output to it
"""
output, bank = super(NNMemoryBankModule, self).forward(output, update=update)
bank = bank.to(output.device).t()
output_normed = torch.nn.functional.normalize(output, dim=1)
bank_normed = torch.nn.functional.normalize(bank, dim=1)
similarity_matrix = torch.einsum("nd,md->nm", output_normed, bank_normed)
index_nearest_neighbours = torch.argmax(similarity_matrix, dim=1)
nearest_neighbours = torch.index_select(
bank, dim=0, index=index_nearest_neighbours
)
return nearest_neighbours
| 2,108 | 31.446154 | 85 | py |
lightly | lightly-master/lightly/openapi_generated/__init__.py | 0 | 0 | 0 | py | |
lightly | lightly-master/lightly/openapi_generated/swagger_client/__init__.py | # coding: utf-8
# flake8: noqa
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
__version__ = "1.0.0"
# import apis into sdk package
from lightly.openapi_generated.swagger_client.api.collaboration_api import CollaborationApi
from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi
from lightly.openapi_generated.swagger_client.api.datasources_api import DatasourcesApi
from lightly.openapi_generated.swagger_client.api.docker_api import DockerApi
from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi
from lightly.openapi_generated.swagger_client.api.embeddings2d_api import Embeddings2dApi
from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi
from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi
from lightly.openapi_generated.swagger_client.api.meta_data_configurations_api import MetaDataConfigurationsApi
from lightly.openapi_generated.swagger_client.api.predictions_api import PredictionsApi
from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi
from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi
from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi
from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi
from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi
from lightly.openapi_generated.swagger_client.api.teams_api import TeamsApi
from lightly.openapi_generated.swagger_client.api.versioning_api import VersioningApi
# import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.configuration import Configuration
from lightly.openapi_generated.swagger_client.exceptions import OpenApiException
from lightly.openapi_generated.swagger_client.exceptions import ApiTypeError
from lightly.openapi_generated.swagger_client.exceptions import ApiValueError
from lightly.openapi_generated.swagger_client.exceptions import ApiKeyError
from lightly.openapi_generated.swagger_client.exceptions import ApiAttributeError
from lightly.openapi_generated.swagger_client.exceptions import ApiException
# import models into sdk package
from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest
from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData
from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode
from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData
from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry
from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest
from lightly.openapi_generated.swagger_client.models.configuration_value_data_type import ConfigurationValueDataType
from lightly.openapi_generated.swagger_client.models.create_cf_bucket_activity_request import CreateCFBucketActivityRequest
from lightly.openapi_generated.swagger_client.models.create_docker_worker_registry_entry_request import CreateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.create_sample_with_write_urls_response import CreateSampleWithWriteUrlsResponse
from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.models.creator import Creator
from lightly.openapi_generated.swagger_client.models.crop_data import CropData
from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest
from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator
from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData
from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched
from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest
from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig
from lightly.openapi_generated.swagger_client.models.datasource_config_azure import DatasourceConfigAzure
from lightly.openapi_generated.swagger_client.models.datasource_config_azure_all_of import DatasourceConfigAzureAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase
from lightly.openapi_generated.swagger_client.models.datasource_config_gcs import DatasourceConfigGCS
from lightly.openapi_generated.swagger_client.models.datasource_config_gcs_all_of import DatasourceConfigGCSAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_lightly import DatasourceConfigLIGHTLY
from lightly.openapi_generated.swagger_client.models.datasource_config_local import DatasourceConfigLOCAL
from lightly.openapi_generated.swagger_client.models.datasource_config_obs import DatasourceConfigOBS
from lightly.openapi_generated.swagger_client.models.datasource_config_obs_all_of import DatasourceConfigOBSAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_s3 import DatasourceConfigS3
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_all_of import DatasourceConfigS3AllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access import DatasourceConfigS3DelegatedAccess
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access_all_of import DatasourceConfigS3DelegatedAccessAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data import DatasourceConfigVerifyData
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data_errors import DatasourceConfigVerifyDataErrors
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse
from lightly.openapi_generated.swagger_client.models.datasource_purpose import DatasourcePurpose
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row import DatasourceRawSamplesDataRow
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data import DatasourceRawSamplesMetadataData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data_row import DatasourceRawSamplesMetadataDataRow
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data_row import DatasourceRawSamplesPredictionsDataRow
from lightly.openapi_generated.swagger_client.models.dimensionality_reduction_method import DimensionalityReductionMethod
from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_created_data import DockerRunArtifactCreatedData
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_data import DockerRunArtifactData
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_storage_location import DockerRunArtifactStorageLocation
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_type import DockerRunArtifactType
from lightly.openapi_generated.swagger_client.models.docker_run_create_request import DockerRunCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_data import DockerRunData
from lightly.openapi_generated.swagger_client.models.docker_run_log_data import DockerRunLogData
from lightly.openapi_generated.swagger_client.models.docker_run_log_entry_data import DockerRunLogEntryData
from lightly.openapi_generated.swagger_client.models.docker_run_log_level import DockerRunLogLevel
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_create_request import DockerRunScheduledCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_data import DockerRunScheduledData
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_priority import DockerRunScheduledPriority
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_state import DockerRunScheduledState
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_update_request import DockerRunScheduledUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_state import DockerRunState
from lightly.openapi_generated.swagger_client.models.docker_run_update_request import DockerRunUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_task_description import DockerTaskDescription
from lightly.openapi_generated.swagger_client.models.docker_user_stats import DockerUserStats
from lightly.openapi_generated.swagger_client.models.docker_worker_config import DockerWorkerConfig
from lightly.openapi_generated.swagger_client.models.docker_worker_config_create_request import DockerWorkerConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_data import DockerWorkerConfigData
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2 import DockerWorkerConfigV2
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_create_request import DockerWorkerConfigV2CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_data import DockerWorkerConfigV2Data
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker import DockerWorkerConfigV2Docker
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_object_level import DockerWorkerConfigV2DockerObjectLevel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_stopping_condition import DockerWorkerConfigV2DockerStoppingCondition
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly import DockerWorkerConfigV2Lightly
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_collate import DockerWorkerConfigV2LightlyCollate
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_model import DockerWorkerConfigV2LightlyModel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_trainer import DockerWorkerConfigV2LightlyTrainer
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3 import DockerWorkerConfigV3
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_create_request import DockerWorkerConfigV3CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_data import DockerWorkerConfigV3Data
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker import DockerWorkerConfigV3Docker
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_corruptness_check import DockerWorkerConfigV3DockerCorruptnessCheck
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_datasource import DockerWorkerConfigV3DockerDatasource
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_training import DockerWorkerConfigV3DockerTraining
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly import DockerWorkerConfigV3Lightly
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_checkpoint_callback import DockerWorkerConfigV3LightlyCheckpointCallback
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_collate import DockerWorkerConfigV3LightlyCollate
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_criterion import DockerWorkerConfigV3LightlyCriterion
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_loader import DockerWorkerConfigV3LightlyLoader
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_model import DockerWorkerConfigV3LightlyModel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_optimizer import DockerWorkerConfigV3LightlyOptimizer
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_trainer import DockerWorkerConfigV3LightlyTrainer
from lightly.openapi_generated.swagger_client.models.docker_worker_registry_entry_data import DockerWorkerRegistryEntryData
from lightly.openapi_generated.swagger_client.models.docker_worker_state import DockerWorkerState
from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType
from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest
from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData
from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData
from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat
from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat
from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest
from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType
from lightly.openapi_generated.swagger_client.models.job_state import JobState
from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData
from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult
from lightly.openapi_generated.swagger_client.models.job_status_meta import JobStatusMeta
from lightly.openapi_generated.swagger_client.models.job_status_upload_method import JobStatusUploadMethod
from lightly.openapi_generated.swagger_client.models.jobs_data import JobsData
from lightly.openapi_generated.swagger_client.models.label_box_data_row import LabelBoxDataRow
from lightly.openapi_generated.swagger_client.models.label_box_v4_data_row import LabelBoxV4DataRow
from lightly.openapi_generated.swagger_client.models.label_studio_task import LabelStudioTask
from lightly.openapi_generated.swagger_client.models.label_studio_task_data import LabelStudioTaskData
from lightly.openapi_generated.swagger_client.models.lightly_docker_selection_method import LightlyDockerSelectionMethod
from lightly.openapi_generated.swagger_client.models.lightly_model_v2 import LightlyModelV2
from lightly.openapi_generated.swagger_client.models.lightly_model_v3 import LightlyModelV3
from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v2 import LightlyTrainerPrecisionV2
from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v3 import LightlyTrainerPrecisionV3
from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton
from lightly.openapi_generated.swagger_client.models.prediction_singleton_base import PredictionSingletonBase
from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification import PredictionSingletonClassification
from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification_all_of import PredictionSingletonClassificationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation import PredictionSingletonInstanceSegmentation
from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation_all_of import PredictionSingletonInstanceSegmentationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection import PredictionSingletonKeypointDetection
from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection_all_of import PredictionSingletonKeypointDetectionAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection import PredictionSingletonObjectDetection
from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection_all_of import PredictionSingletonObjectDetectionAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation import PredictionSingletonSemanticSegmentation
from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation_all_of import PredictionSingletonSemanticSegmentationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_base import PredictionTaskSchemaBase
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category import PredictionTaskSchemaCategory
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints import PredictionTaskSchemaCategoryKeypoints
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints_all_of import PredictionTaskSchemaCategoryKeypointsAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint import PredictionTaskSchemaKeypoint
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint_all_of import PredictionTaskSchemaKeypointAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple import PredictionTaskSchemaSimple
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple_all_of import PredictionTaskSchemaSimpleAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas
from lightly.openapi_generated.swagger_client.models.questionnaire_data import QuestionnaireData
from lightly.openapi_generated.swagger_client.models.s3_region import S3Region
from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask
from lightly.openapi_generated.swagger_client.models.sama_task_data import SamaTaskData
from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest
from lightly.openapi_generated.swagger_client.models.sample_data import SampleData
from lightly.openapi_generated.swagger_client.models.sample_data_modes import SampleDataModes
from lightly.openapi_generated.swagger_client.models.sample_meta_data import SampleMetaData
from lightly.openapi_generated.swagger_client.models.sample_partial_mode import SamplePartialMode
from lightly.openapi_generated.swagger_client.models.sample_sort_by import SampleSortBy
from lightly.openapi_generated.swagger_client.models.sample_type import SampleType
from lightly.openapi_generated.swagger_client.models.sample_update_request import SampleUpdateRequest
from lightly.openapi_generated.swagger_client.models.sample_write_urls import SampleWriteUrls
from lightly.openapi_generated.swagger_client.models.sampling_config import SamplingConfig
from lightly.openapi_generated.swagger_client.models.sampling_config_stopping_condition import SamplingConfigStoppingCondition
from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest
from lightly.openapi_generated.swagger_client.models.sampling_method import SamplingMethod
from lightly.openapi_generated.swagger_client.models.sector import Sector
from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig
from lightly.openapi_generated.swagger_client.models.selection_config_entry import SelectionConfigEntry
from lightly.openapi_generated.swagger_client.models.selection_config_entry_input import SelectionConfigEntryInput
from lightly.openapi_generated.swagger_client.models.selection_config_entry_strategy import SelectionConfigEntryStrategy
from lightly.openapi_generated.swagger_client.models.selection_input_predictions_name import SelectionInputPredictionsName
from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType
from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation
from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType
from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData
from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest
from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.shared_access_config_data import SharedAccessConfigData
from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType
from lightly.openapi_generated.swagger_client.models.tag_active_learning_scores_data import TagActiveLearningScoresData
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_operation import TagArithmeticsOperation
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_request import TagArithmeticsRequest
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_response import TagArithmeticsResponse
from lightly.openapi_generated.swagger_client.models.tag_bit_mask_response import TagBitMaskResponse
from lightly.openapi_generated.swagger_client.models.tag_change_data import TagChangeData
from lightly.openapi_generated.swagger_client.models.tag_change_data_arithmetics import TagChangeDataArithmetics
from lightly.openapi_generated.swagger_client.models.tag_change_data_initial import TagChangeDataInitial
from lightly.openapi_generated.swagger_client.models.tag_change_data_metadata import TagChangeDataMetadata
from lightly.openapi_generated.swagger_client.models.tag_change_data_operation_method import TagChangeDataOperationMethod
from lightly.openapi_generated.swagger_client.models.tag_change_data_rename import TagChangeDataRename
from lightly.openapi_generated.swagger_client.models.tag_change_data_sampler import TagChangeDataSampler
from lightly.openapi_generated.swagger_client.models.tag_change_data_samples import TagChangeDataSamples
from lightly.openapi_generated.swagger_client.models.tag_change_data_scatterplot import TagChangeDataScatterplot
from lightly.openapi_generated.swagger_client.models.tag_change_data_upsize import TagChangeDataUpsize
from lightly.openapi_generated.swagger_client.models.tag_change_entry import TagChangeEntry
from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest
from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest
from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest
from lightly.openapi_generated.swagger_client.models.task_type import TaskType
from lightly.openapi_generated.swagger_client.models.team_basic_data import TeamBasicData
from lightly.openapi_generated.swagger_client.models.team_data import TeamData
from lightly.openapi_generated.swagger_client.models.team_role import TeamRole
from lightly.openapi_generated.swagger_client.models.trigger2d_embedding_job_request import Trigger2dEmbeddingJobRequest
from lightly.openapi_generated.swagger_client.models.update_docker_worker_registry_entry_request import UpdateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.models.user_type import UserType
from lightly.openapi_generated.swagger_client.models.video_frame_data import VideoFrameData
from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData
| 25,833 | 98.745174 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api_client.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import atexit
import datetime
from dateutil.parser import parse
import json
import mimetypes
from multiprocessing.pool import ThreadPool
import os
import re
import tempfile
from urllib.parse import quote
from lightly.openapi_generated.swagger_client.configuration import Configuration
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
import lightly.openapi_generated.swagger_client.models
from lightly.openapi_generated.swagger_client import rest
from lightly.openapi_generated.swagger_client.exceptions import ApiValueError, ApiException
class ApiClient(object):
"""Generic API client for OpenAPI client library builds.
OpenAPI generic API client. This client handles the client-
server communication, and is invariant across implementations. Specifics of
the methods and models for each application are generated from the OpenAPI
templates.
:param configuration: .Configuration object for this client
:param header_name: a header to pass when making calls to the API.
:param header_value: a header value to pass when making calls to
the API.
:param cookie: a cookie to include in the header when making calls
to the API
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, str, int)
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int, # TODO remove as only py3 is supported?
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
_pool = None
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1):
# use default configuration if none is provided
if configuration is None:
configuration = Configuration.get_default()
self.configuration = configuration
self.pool_threads = pool_threads
self.rest_client = rest.RESTClientObject(configuration)
self.default_headers = {}
if header_name is not None:
self.default_headers[header_name] = header_value
self.cookie = cookie
# Set default User-Agent.
self.user_agent = 'OpenAPI-Generator/1.0.0/python'
self.client_side_validation = configuration.client_side_validation
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._pool:
self._pool.close()
self._pool.join()
self._pool = None
if hasattr(atexit, 'unregister'):
atexit.unregister(self.close)
@property
def pool(self):
"""Create thread pool on first request
avoids instantiating unused threadpool for blocking clients.
"""
if self._pool is None:
atexit.register(self.close)
self._pool = ThreadPool(self.pool_threads)
return self._pool
@property
def user_agent(self):
"""User agent for this API client"""
return self.default_headers['User-Agent']
@user_agent.setter
def user_agent(self, value):
self.default_headers['User-Agent'] = value
def set_default_header(self, header_name, header_value):
self.default_headers[header_name] = header_value
_default = None
@classmethod
def get_default(cls):
"""Return new instance of ApiClient.
This method returns newly created, based on default constructor,
object of ApiClient class or returns a copy of default
ApiClient.
:return: The ApiClient object.
"""
if cls._default is None:
cls._default = ApiClient()
return cls._default
@classmethod
def set_default(cls, default):
"""Set default instance of ApiClient.
It stores default ApiClient.
:param default: object of ApiClient.
"""
cls._default = default
def __call_api(
self, resource_path, method, path_params=None,
query_params=None, header_params=None, body=None, post_params=None,
files=None, response_types_map=None, auth_settings=None,
_return_http_data_only=None, collection_formats=None,
_preload_content=True, _request_timeout=None, _host=None,
_request_auth=None):
config = self.configuration
# header parameters
header_params = header_params or {}
header_params.update(self.default_headers)
if self.cookie:
header_params['Cookie'] = self.cookie
if header_params:
header_params = self.sanitize_for_serialization(header_params)
header_params = dict(self.parameters_to_tuples(header_params,
collection_formats))
# path parameters
if path_params:
path_params = self.sanitize_for_serialization(path_params)
path_params = self.parameters_to_tuples(path_params,
collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace(
'{%s}' % k,
quote(str(v), safe=config.safe_chars_for_path_param)
)
# post parameters
if post_params or files:
post_params = post_params if post_params else []
post_params = self.sanitize_for_serialization(post_params)
post_params = self.parameters_to_tuples(post_params,
collection_formats)
post_params.extend(self.files_parameters(files))
# auth setting
self.update_params_for_auth(
header_params, query_params, auth_settings,
resource_path, method, body,
request_auth=_request_auth)
# body
if body:
body = self.sanitize_for_serialization(body)
# request url
if _host is None:
url = self.configuration.host + resource_path
else:
# use server/host defined in path or operation instead
url = _host + resource_path
# query parameters
if query_params:
query_params = self.sanitize_for_serialization(query_params)
url_query = self.parameters_to_url_query(query_params,
collection_formats)
url += "?" + url_query
try:
# perform request and return response
response_data = self.request(
method, url,
query_params=query_params,
headers=header_params,
post_params=post_params, body=body,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
except ApiException as e:
if e.body:
e.body = e.body.decode('utf-8')
raise e
self.last_response = response_data
return_data = None # assuming derialization is not needed
# data needs deserialization or returns HTTP data (deserialized) only
if _preload_content or _return_http_data_only:
response_type = response_types_map.get(str(response_data.status), None)
if response_type == "bytearray":
response_data.data = response_data.data
else:
match = None
content_type = response_data.getheader('content-type')
if content_type is not None:
match = re.search(r"charset=([a-zA-Z\-\d]+)[\s;]?", content_type)
encoding = match.group(1) if match else "utf-8"
response_data.data = response_data.data.decode(encoding)
# deserialize response data
if response_type == "bytearray":
return_data = response_data.data
elif response_type:
return_data = self.deserialize(response_data, response_type)
else:
return_data = None
if _return_http_data_only:
return return_data
else:
return ApiResponse(status_code = response_data.status,
data = return_data,
headers = response_data.getheaders(),
raw_data = response_data.data)
def sanitize_for_serialization(self, obj):
"""Builds a JSON POST object.
If obj is None, return None.
If obj is str, int, long, float, bool, return directly.
If obj is datetime.datetime, datetime.date
convert to string in iso8601 format.
If obj is list, sanitize each element in the list.
If obj is dict, return the dict.
If obj is OpenAPI model, return the properties dict.
:param obj: The data to serialize.
:return: The serialized form of data.
"""
if obj is None:
return None
elif isinstance(obj, self.PRIMITIVE_TYPES):
return obj
elif isinstance(obj, list):
return [self.sanitize_for_serialization(sub_obj)
for sub_obj in obj]
elif isinstance(obj, tuple):
return tuple(self.sanitize_for_serialization(sub_obj)
for sub_obj in obj)
elif isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, dict):
obj_dict = obj
else:
# Convert model obj to dict except
# attributes `openapi_types`, `attribute_map`
# and attributes which value is not None.
# Convert attribute name to json key in
# model definition for request.
obj_dict = obj.to_dict(by_alias=True)
return {key: self.sanitize_for_serialization(val)
for key, val in obj_dict.items()}
def deserialize(self, response, response_type):
"""Deserializes response into an object.
:param response: RESTResponse object to be deserialized.
:param response_type: class literal for
deserialized object, or string of class name.
:return: deserialized object.
"""
# handle file downloading
# save response body into a tmp file and return the instance
if response_type == "file":
return self.__deserialize_file(response)
# fetch data from response object
try:
data = json.loads(response.data)
except ValueError:
data = response.data
return self.__deserialize(data, response_type)
def __deserialize(self, data, klass):
"""Deserializes dict, list, str into an object.
:param data: dict, list or str.
:param klass: class literal, or string of class name.
:return: object.
"""
if data is None:
return None
if type(klass) == str:
if klass.startswith('List['):
sub_kls = re.match(r'List\[(.*)]', klass).group(1)
return [self.__deserialize(sub_data, sub_kls)
for sub_data in data]
if klass.startswith('Dict['):
sub_kls = re.match(r'Dict\[([^,]*), (.*)]', klass).group(2)
return {k: self.__deserialize(v, sub_kls)
for k, v in data.items()}
# convert str to class
if klass in self.NATIVE_TYPES_MAPPING:
klass = self.NATIVE_TYPES_MAPPING[klass]
else:
klass = getattr(lightly.openapi_generated.swagger_client.models, klass)
if klass in self.PRIMITIVE_TYPES:
return self.__deserialize_primitive(data, klass)
elif klass == object:
return self.__deserialize_object(data)
elif klass == datetime.date:
return self.__deserialize_date(data)
elif klass == datetime.datetime:
return self.__deserialize_datetime(data)
else:
return self.__deserialize_model(data, klass)
def call_api(self, resource_path, method,
path_params=None, query_params=None, header_params=None,
body=None, post_params=None, files=None,
response_types_map=None, auth_settings=None,
async_req=None, _return_http_data_only=None,
collection_formats=None, _preload_content=True,
_request_timeout=None, _host=None, _request_auth=None):
"""Makes the HTTP request (synchronous) and returns deserialized data.
To make an async_req request, set the async_req parameter.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be
placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response: Response data type.
:param files dict: key -> filename, value -> filepath,
for `multipart/form-data`.
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_token: dict, optional
:return:
If async_req parameter is True,
the request will be called asynchronously.
The method will return the request thread.
If parameter async_req is False or missing,
then the method will return the response directly.
"""
if not async_req:
return self.__call_api(resource_path, method,
path_params, query_params, header_params,
body, post_params, files,
response_types_map, auth_settings,
_return_http_data_only, collection_formats,
_preload_content, _request_timeout, _host,
_request_auth)
return self.pool.apply_async(self.__call_api, (resource_path,
method, path_params,
query_params,
header_params, body,
post_params, files,
response_types_map,
auth_settings,
_return_http_data_only,
collection_formats,
_preload_content,
_request_timeout,
_host, _request_auth))
def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
"""Makes the HTTP request using RESTClient."""
if method == "GET":
return self.rest_client.get_request(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "HEAD":
return self.rest_client.head_request(url,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
headers=headers)
elif method == "OPTIONS":
return self.rest_client.options_request(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout)
elif method == "POST":
return self.rest_client.post_request(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PUT":
return self.rest_client.put_request(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "PATCH":
return self.rest_client.patch_request(url,
query_params=query_params,
headers=headers,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
elif method == "DELETE":
return self.rest_client.delete_request(url,
query_params=query_params,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
else:
raise ApiValueError(
"http method must be `GET`, `HEAD`, `OPTIONS`,"
" `POST`, `PATCH`, `PUT` or `DELETE`."
)
def parameters_to_tuples(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: Parameters as list of tuples, collections formatted
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(str(value) for value in v)))
else:
new_params.append((k, v))
return new_params
def parameters_to_url_query(self, params, collection_formats):
"""Get parameters as list of tuples, formatting collections.
:param params: Parameters as dict or list of two-tuples
:param dict collection_formats: Parameter collection formats
:return: URL query string (e.g. a=Hello%20World&b=123)
"""
new_params = []
if collection_formats is None:
collection_formats = {}
for k, v in params.items() if isinstance(params, dict) else params: # noqa: E501
if isinstance(v, (int, float)):
v = str(v)
if isinstance(v, bool):
v = str(v).lower()
if isinstance(v, dict):
v = json.dumps(v)
if k in collection_formats:
collection_format = collection_formats[k]
if collection_format == 'multi':
new_params.extend((k, value) for value in v)
else:
if collection_format == 'ssv':
delimiter = ' '
elif collection_format == 'tsv':
delimiter = '\t'
elif collection_format == 'pipes':
delimiter = '|'
else: # csv is the default
delimiter = ','
new_params.append(
(k, delimiter.join(quote(str(value)) for value in v)))
else:
new_params.append((k, quote(str(v))))
return "&".join(["=".join(item) for item in new_params])
def files_parameters(self, files=None):
"""Builds form parameters.
:param files: File parameters.
:return: Form parameters with files.
"""
params = []
if files:
for k, v in files.items():
if not v:
continue
file_names = v if type(v) is list else [v]
for n in file_names:
with open(n, 'rb') as f:
filename = os.path.basename(f.name)
filedata = f.read()
mimetype = (mimetypes.guess_type(filename)[0] or
'application/octet-stream')
params.append(
tuple([k, tuple([filename, filedata, mimetype])]))
return params
def select_header_accept(self, accepts):
"""Returns `Accept` based on an array of accepts provided.
:param accepts: List of headers.
:return: Accept (e.g. application/json).
"""
if not accepts:
return
for accept in accepts:
if re.search('json', accept, re.IGNORECASE):
return accept
return accepts[0]
def select_header_content_type(self, content_types):
"""Returns `Content-Type` based on an array of content_types provided.
:param content_types: List of content-types.
:return: Content-Type (e.g. application/json).
"""
if not content_types:
return None
for content_type in content_types:
if re.search('json', content_type, re.IGNORECASE):
return content_type
return content_types[0]
def update_params_for_auth(self, headers, queries, auth_settings,
resource_path, method, body,
request_auth=None):
"""Updates header and query params based on authentication setting.
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:param auth_settings: Authentication setting identifiers list.
:resource_path: A string representation of the HTTP request resource path.
:method: A string representation of the HTTP request method.
:body: A object representing the body of the HTTP request.
The object type is the return value of sanitize_for_serialization().
:param request_auth: if set, the provided settings will
override the token in the configuration.
"""
if not auth_settings:
return
if request_auth:
self._apply_auth_params(headers, queries,
resource_path, method, body,
request_auth)
return
for auth in auth_settings:
auth_setting = self.configuration.auth_settings().get(auth)
if auth_setting:
self._apply_auth_params(headers, queries,
resource_path, method, body,
auth_setting)
def _apply_auth_params(self, headers, queries,
resource_path, method, body,
auth_setting):
"""Updates the request parameters based on a single auth_setting
:param headers: Header parameters dict to be updated.
:param queries: Query parameters tuple list to be updated.
:resource_path: A string representation of the HTTP request resource path.
:method: A string representation of the HTTP request method.
:body: A object representing the body of the HTTP request.
The object type is the return value of sanitize_for_serialization().
:param auth_setting: auth settings for the endpoint
"""
if auth_setting['in'] == 'cookie':
headers['Cookie'] = auth_setting['value']
elif auth_setting['in'] == 'header':
if auth_setting['type'] != 'http-signature':
headers[auth_setting['key']] = auth_setting['value']
elif auth_setting['in'] == 'query':
queries.append((auth_setting['key'], auth_setting['value']))
else:
raise ApiValueError(
'Authentication token must be in `query` or `header`'
)
def __deserialize_file(self, response):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
:param response: RESTResponse.
:return: file path.
"""
fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path)
os.close(fd)
os.remove(path)
content_disposition = response.getheader("Content-Disposition")
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
f.write(response.data)
return path
def __deserialize_primitive(self, data, klass):
"""Deserializes string to primitive type.
:param data: str.
:param klass: class literal.
:return: int, long, float, str, bool.
"""
try:
return klass(data)
except UnicodeEncodeError:
return str(data)
except TypeError:
return data
def __deserialize_object(self, value):
"""Return an original value.
:return: object.
"""
return value
def __deserialize_date(self, string):
"""Deserializes string to date.
:param string: str.
:return: date.
"""
try:
return parse(string).date()
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason="Failed to parse `{0}` as date object".format(string)
)
def __deserialize_datetime(self, string):
"""Deserializes string to datetime.
The string should be in iso8601 datetime format.
:param string: str.
:return: datetime.
"""
try:
return parse(string)
except ImportError:
return string
except ValueError:
raise rest.ApiException(
status=0,
reason=(
"Failed to parse `{0}` as datetime object"
.format(string)
)
)
def __deserialize_model(self, data, klass):
"""Deserializes list or dict to model.
:param data: dict, list.
:param klass: class literal.
:return: model object.
"""
return klass.from_dict(data)
| 30,347 | 39.089828 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api_response.py | """API response object."""
from __future__ import annotations
from typing import Any, Dict, Optional
from pydantic import Field, StrictInt, StrictStr
class ApiResponse:
"""
API response object
"""
status_code: Optional[StrictInt] = Field(None, description="HTTP status code")
headers: Optional[Dict[StrictStr, StrictStr]] = Field(None, description="HTTP headers")
data: Optional[Any] = Field(None, description="Deserialized data given the data type")
raw_data: Optional[Any] = Field(None, description="Raw data (HTTP response body)")
def __init__(self,
status_code=None,
headers=None,
data=None,
raw_data=None):
self.status_code = status_code
self.headers = headers
self.data = data
self.raw_data = raw_data
| 844 | 31.5 | 91 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/configuration.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import copy
import logging
import multiprocessing
import sys
import urllib3
import http.client as httplib
from lightly.openapi_generated.swagger_client.exceptions import ApiValueError
JSON_SCHEMA_VALIDATION_KEYWORDS = {
'multipleOf', 'maximum', 'exclusiveMaximum',
'minimum', 'exclusiveMinimum', 'maxLength',
'minLength', 'pattern', 'maxItems', 'minItems'
}
class Configuration(object):
"""This class contains various settings of the API client.
:param host: Base url.
:param api_key: Dict to store API key(s).
Each entry in the dict specifies an API key.
The dict key is the name of the security scheme in the OAS specification.
The dict value is the API key secret.
:param api_key_prefix: Dict to store API prefix (e.g. Bearer).
The dict key is the name of the security scheme in the OAS specification.
The dict value is an API key prefix when generating the auth data.
:param username: Username for HTTP basic authentication.
:param password: Password for HTTP basic authentication.
:param access_token: Access token.
:param server_index: Index to servers configuration.
:param server_variables: Mapping with string values to replace variables in
templated server configuration. The validation of enums is performed for
variables with defined enum values before.
:param server_operation_index: Mapping from operation ID to an index to server
configuration.
:param server_operation_variables: Mapping from operation ID to a mapping with
string values to replace variables in templated server configuration.
The validation of enums is performed for variables with defined enum values before.
:param ssl_ca_cert: str - the path to a file of concatenated CA certificates
in PEM format.
:Example:
API Key Authentication Example.
Given the following security scheme in the OpenAPI specification:
components:
securitySchemes:
cookieAuth: # name for the security scheme
type: apiKey
in: cookie
name: JSESSIONID # cookie name
You can programmatically set the cookie:
conf = lightly.openapi_generated.swagger_client.Configuration(
api_key={'cookieAuth': 'abc123'}
api_key_prefix={'cookieAuth': 'JSESSIONID'}
)
The following cookie will be added to the HTTP request:
Cookie: JSESSIONID abc123
"""
_default = None
def __init__(self, host=None,
api_key=None, api_key_prefix=None,
username=None, password=None,
access_token=None,
server_index=None, server_variables=None,
server_operation_index=None, server_operation_variables=None,
ssl_ca_cert=None,
):
"""Constructor
"""
self._base_path = "https://api.lightly.ai" if host is None else host
"""Default Base url
"""
self.server_index = 0 if server_index is None and host is None else server_index
self.server_operation_index = server_operation_index or {}
"""Default server index
"""
self.server_variables = server_variables or {}
self.server_operation_variables = server_operation_variables or {}
"""Default server variables
"""
self.temp_folder_path = None
"""Temp file folder for downloading files
"""
# Authentication Settings
self.api_key = {}
if api_key:
self.api_key = api_key
"""dict to store API key(s)
"""
self.api_key_prefix = {}
if api_key_prefix:
self.api_key_prefix = api_key_prefix
"""dict to store API prefix (e.g. Bearer)
"""
self.refresh_api_key_hook = None
"""function hook to refresh API key if expired
"""
self.username = username
"""Username for HTTP basic authentication
"""
self.password = password
"""Password for HTTP basic authentication
"""
self.access_token = access_token
"""Access token
"""
self.access_token = None
"""access token for OAuth/Bearer
"""
self.logger = {}
"""Logging Settings
"""
self.logger["package_logger"] = logging.getLogger("lightly.openapi_generated.swagger_client")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
"""Log format
"""
self.logger_stream_handler = None
"""Log stream handler
"""
self.logger_file_handler = None
"""Log file handler
"""
self.logger_file = None
"""Debug file location
"""
self.debug = False
"""Debug switch
"""
self.verify_ssl = True
"""SSL/TLS verification
Set this to false to skip verifying SSL certificate when calling API
from https server.
"""
self.ssl_ca_cert = ssl_ca_cert
"""Set this to customize the certificate file to verify the peer.
"""
self.cert_file = None
"""client certificate file
"""
self.key_file = None
"""client key file
"""
self.assert_hostname = None
"""Set this to True/False to enable/disable SSL hostname verification.
"""
self.tls_server_name = None
"""SSL/TLS Server Name Indication (SNI)
Set this to the SNI value expected by the server.
"""
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
"""urllib3 connection pool's maximum number of connections saved
per pool. urllib3 uses 1 connection as default value, but this is
not the best value when you are making a lot of possibly parallel
requests to the same host, which is often the case here.
cpu_count * 5 is used as default value to increase performance.
"""
self.proxy = None
"""Proxy URL
"""
self.proxy_headers = None
"""Proxy headers
"""
self.safe_chars_for_path_param = ''
"""Safe chars for path_param
"""
self.retries = None
"""Adding retries to override urllib3 default value 3
"""
# Enable client side validation
self.client_side_validation = True
self.socket_options = None
"""Options to pass down to the underlying urllib3 socket
"""
self.datetime_format = "%Y-%m-%dT%H:%M:%S.%f%z"
"""datetime format
"""
self.date_format = "%Y-%m-%d"
"""date format
"""
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ('logger', 'logger_file_handler', 'logger_stream_handler'):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
@classmethod
def set_default(cls, default):
"""Set default instance of configuration.
It stores default configuration, which can be
returned by get_default_copy method.
:param default: object of Configuration
"""
cls._default = default
@classmethod
def get_default_copy(cls):
"""Deprecated. Please use `get_default` instead.
Deprecated. Please use `get_default` instead.
:return: The configuration object.
"""
return cls.get_default()
@classmethod
def get_default(cls):
"""Return the default configuration.
This method returns newly created, based on default constructor,
object of Configuration class or returns a copy of default
configuration.
:return: The configuration object.
"""
if cls._default is None:
cls._default = Configuration()
return cls._default
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_file_handler)
if self.logger_stream_handler:
logger.removeHandler(self.logger_stream_handler)
else:
# If not set logging file,
# then add stream handler and remove file handler.
self.logger_stream_handler = logging.StreamHandler()
self.logger_stream_handler.setFormatter(self.logger_formatter)
for _, logger in self.logger.items():
logger.addHandler(self.logger_stream_handler)
if self.logger_file_handler:
logger.removeHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in self.logger.items():
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in self.logger.items():
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier, alias=None):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:param alias: The alternative identifier of apiKey.
:return: The token for api key authentication.
"""
if self.refresh_api_key_hook is not None:
self.refresh_api_key_hook(self)
key = self.api_key.get(identifier, self.api_key.get(alias) if alias is not None else None)
if key:
prefix = self.api_key_prefix.get(identifier)
if prefix:
return "%s %s" % (prefix, key)
else:
return key
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
username = ""
if self.username is not None:
username = self.username
password = ""
if self.password is not None:
password = self.password
return urllib3.util.make_headers(
basic_auth=username + ':' + password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
auth = {}
if self.access_token is not None:
auth['auth0Bearer'] = {
'type': 'bearer',
'in': 'header',
'format': 'JWT',
'key': 'Authorization',
'value': 'Bearer ' + self.access_token
}
if 'ApiPublicJWTAuth' in self.api_key:
auth['ApiPublicJWTAuth'] = {
'type': 'api_key',
'in': 'query',
'key': 'publicToken',
'value': self.get_api_key_with_prefix(
'ApiPublicJWTAuth',
),
}
if 'ApiKeyAuth' in self.api_key:
auth['ApiKeyAuth'] = {
'type': 'api_key',
'in': 'query',
'key': 'token',
'value': self.get_api_key_with_prefix(
'ApiKeyAuth',
),
}
if 'InternalKeyAuth' in self.api_key:
auth['InternalKeyAuth'] = {
'type': 'api_key',
'in': 'query',
'key': 'secret',
'value': self.get_api_key_with_prefix(
'InternalKeyAuth',
),
}
return auth
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 1.0.0\n"\
"SDK Package Version: 1.0.0".\
format(env=sys.platform, pyversion=sys.version)
def get_host_settings(self):
"""Gets an array of host settings
:return: An array of host settings
"""
return [
{
'url': "https://api.lightly.ai",
'description': "No description provided",
},
{
'url': "https://api-staging.lightly.ai",
'description': "No description provided",
},
{
'url': "https://api-dev.lightly.ai",
'description': "No description provided",
},
{
'url': "https://api.dev.lightly.ai",
'description': "No description provided",
},
{
'url': "http://localhost:5000",
'description': "No description provided",
}
]
def get_host_from_settings(self, index, variables=None, servers=None):
"""Gets host URL based on the index and variables
:param index: array index of the host settings
:param variables: hash of variable and the corresponding value
:param servers: an array of host settings or None
:return: URL based on host settings
"""
if index is None:
return self._base_path
variables = {} if variables is None else variables
servers = self.get_host_settings() if servers is None else servers
try:
server = servers[index]
except IndexError:
raise ValueError(
"Invalid index {0} when selecting the host settings. "
"Must be less than {1}".format(index, len(servers)))
url = server['url']
# go through variables and replace placeholders
for variable_name, variable in server.get('variables', {}).items():
used_value = variables.get(
variable_name, variable['default_value'])
if 'enum_values' in variable \
and used_value not in variable['enum_values']:
raise ValueError(
"The variable `{0}` in the host URL has invalid value "
"{1}. Must be {2}.".format(
variable_name, variables[variable_name],
variable['enum_values']))
url = url.replace("{" + variable_name + "}", used_value)
return url
@property
def host(self):
"""Return generated host."""
return self.get_host_from_settings(self.server_index, variables=self.server_variables)
@host.setter
def host(self, value):
"""Fix base path."""
self._base_path = value
self.server_index = None
| 17,712 | 32.998081 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/exceptions.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result
| 5,292 | 31.27439 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/rest.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode, quote_plus
import urllib3
from lightly.openapi_generated.swagger_client.exceptions import ApiException, UnauthorizedException, ForbiddenException, NotFoundException, ServiceException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.headers
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.headers.get(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.tls_server_name:
addition_pool_args['server_hostname'] = configuration.tls_server_name
if configuration.socket_options is not None:
addition_pool_args['socket_options'] = configuration.socket_options
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=configuration.ssl_ca_cert,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
# url already contains the URL query string
# so reset query_params to empty dict
query_params = {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int,float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
# no content type provided or payload is json
if not headers.get('Content-Type') or re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body, allow_nan=False)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields={},
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
if r.status == 401:
raise UnauthorizedException(http_resp=r)
if r.status == 403:
raise ForbiddenException(http_resp=r)
if r.status == 404:
raise NotFoundException(http_resp=r)
if 500 <= r.status <= 599:
raise ServiceException(http_resp=r)
raise ApiException(http_resp=r)
return r
def get_request(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def head_request(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def options_request(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def delete_request(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def post_request(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def put_request(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def patch_request(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
| 12,981 | 41.986755 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/__init__.py | # flake8: noqa
# import apis into api package
from lightly.openapi_generated.swagger_client.api.collaboration_api import CollaborationApi
from lightly.openapi_generated.swagger_client.api.datasets_api import DatasetsApi
from lightly.openapi_generated.swagger_client.api.datasources_api import DatasourcesApi
from lightly.openapi_generated.swagger_client.api.docker_api import DockerApi
from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi
from lightly.openapi_generated.swagger_client.api.embeddings2d_api import Embeddings2dApi
from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi
from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi
from lightly.openapi_generated.swagger_client.api.meta_data_configurations_api import MetaDataConfigurationsApi
from lightly.openapi_generated.swagger_client.api.predictions_api import PredictionsApi
from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi
from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi
from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi
from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi
from lightly.openapi_generated.swagger_client.api.tags_api import TagsApi
from lightly.openapi_generated.swagger_client.api.teams_api import TeamsApi
from lightly.openapi_generated.swagger_client.api.versioning_api import VersioningApi
| 1,474 | 66.045455 | 111 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/collaboration_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.shared_access_config_data import SharedAccessConfigData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class CollaborationApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_or_update_shared_access_config_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], shared_access_config_create_request : SharedAccessConfigCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_or_update_shared_access_config_by_dataset_id # noqa: E501
Create or update a shared access config. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_shared_access_config_by_dataset_id(dataset_id, shared_access_config_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param shared_access_config_create_request: (required)
:type shared_access_config_create_request: SharedAccessConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_or_update_shared_access_config_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_or_update_shared_access_config_by_dataset_id_with_http_info(dataset_id, shared_access_config_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_or_update_shared_access_config_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], shared_access_config_create_request : SharedAccessConfigCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_or_update_shared_access_config_by_dataset_id # noqa: E501
Create or update a shared access config. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_shared_access_config_by_dataset_id_with_http_info(dataset_id, shared_access_config_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param shared_access_config_create_request: (required)
:type shared_access_config_create_request: SharedAccessConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'shared_access_config_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_shared_access_config_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['shared_access_config_create_request'] is not None:
_body_params = _params['shared_access_config_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/collaboration/access', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def delete_shared_access_config_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], access_config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the shared access config.")], **kwargs) -> None: # noqa: E501
"""delete_shared_access_config_by_id # noqa: E501
Delete shared access config by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_shared_access_config_by_id(dataset_id, access_config_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param access_config_id: ObjectId of the shared access config. (required)
:type access_config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_shared_access_config_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_shared_access_config_by_id_with_http_info(dataset_id, access_config_id, **kwargs) # noqa: E501
@validate_arguments
def delete_shared_access_config_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], access_config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the shared access config.")], **kwargs) -> ApiResponse: # noqa: E501
"""delete_shared_access_config_by_id # noqa: E501
Delete shared access config by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_shared_access_config_by_id_with_http_info(dataset_id, access_config_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param access_config_id: ObjectId of the shared access config. (required)
:type access_config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'access_config_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_shared_access_config_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['access_config_id']:
_path_params['accessConfigId'] = _params['access_config_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/collaboration/access/{accessConfigId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_shared_access_configs_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[SharedAccessConfigData]: # noqa: E501
"""get_shared_access_configs_by_dataset_id # noqa: E501
Get shared access configs by datasetId. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shared_access_configs_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[SharedAccessConfigData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_shared_access_configs_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_shared_access_configs_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_shared_access_configs_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_shared_access_configs_by_dataset_id # noqa: E501
Get shared access configs by datasetId. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_shared_access_configs_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[SharedAccessConfigData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_shared_access_configs_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[SharedAccessConfigData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/collaboration/access', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 22,634 | 44.635081 | 314 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/datasets_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictBool, StrictInt, conint, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest
from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData
from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched
from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest
from lightly.openapi_generated.swagger_client.models.job_status_meta import JobStatusMeta
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DatasetsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_dataset(self, dataset_create_request : DatasetCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_dataset # noqa: E501
Creates a new dataset for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_dataset(dataset_create_request, async_req=True)
>>> result = thread.get()
:param dataset_create_request: (required)
:type dataset_create_request: DatasetCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_dataset_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_dataset_with_http_info(dataset_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_dataset_with_http_info(self, dataset_create_request : DatasetCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_dataset # noqa: E501
Creates a new dataset for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_dataset_with_http_info(dataset_create_request, async_req=True)
>>> result = thread.get()
:param dataset_create_request: (required)
:type dataset_create_request: DatasetCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_dataset" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['dataset_create_request'] is not None:
_body_params = _params['dataset_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def delete_dataset_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], force : Optional[StrictBool] = None, **kwargs) -> None: # noqa: E501
"""delete_dataset_by_id # noqa: E501
Delete a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dataset_by_id(dataset_id, force, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param force:
:type force: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_dataset_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_dataset_by_id_with_http_info(dataset_id, force, **kwargs) # noqa: E501
@validate_arguments
def delete_dataset_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], force : Optional[StrictBool] = None, **kwargs) -> ApiResponse: # noqa: E501
"""delete_dataset_by_id # noqa: E501
Delete a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_dataset_by_id_with_http_info(dataset_id, force, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param force:
:type force: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'force'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_dataset_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('force') is not None: # noqa: E501
_query_params.append((
'force',
_params['force'].value if hasattr(_params['force'], 'value') else _params['force']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_children_of_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[DatasetData]: # noqa: E501
"""get_children_of_dataset_id # noqa: E501
Get all datasets which are the children of a specific dataset (e.g crop datasets) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_children_of_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_children_of_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_children_of_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_children_of_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_children_of_dataset_id # noqa: E501
Get all datasets which are the children of a specific dataset (e.g crop datasets) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_children_of_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_children_of_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/children', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_dataset_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> DatasetData: # noqa: E501
"""get_dataset_by_id # noqa: E501
Get a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dataset_by_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasetData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_dataset_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_dataset_by_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_dataset_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_dataset_by_id # noqa: E501
Get a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dataset_by_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasetData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_dataset_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasetData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasets(self, shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[DatasetData]: # noqa: E501
"""get_datasets # noqa: E501
Get all datasets for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets(shared, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasets_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasets_with_http_info(shared, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_datasets_with_http_info(self, shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_datasets # noqa: E501
Get all datasets for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_with_http_info(shared, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'shared',
'get_assets_of_team',
'get_assets_of_team_inclusive_self',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasets" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('shared') is not None: # noqa: E501
_query_params.append((
'shared',
_params['shared'].value if hasattr(_params['shared'], 'value') else _params['shared']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasets_enriched(self, shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, limit : Annotated[Optional[StrictInt], Field(description="DEPRECATED, use pageSize instead. if set, only returns the newest up until limit")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[DatasetDataEnriched]: # noqa: E501
"""get_datasets_enriched # noqa: E501
Get all datasets for a user but enriched with additional information as nTags, nEmbeddings, samples # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_enriched(shared, limit, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param limit: DEPRECATED, use pageSize instead. if set, only returns the newest up until limit
:type limit: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetDataEnriched]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasets_enriched_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasets_enriched_with_http_info(shared, limit, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_datasets_enriched_with_http_info(self, shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, limit : Annotated[Optional[StrictInt], Field(description="DEPRECATED, use pageSize instead. if set, only returns the newest up until limit")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_datasets_enriched # noqa: E501
Get all datasets for a user but enriched with additional information as nTags, nEmbeddings, samples # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_enriched_with_http_info(shared, limit, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param limit: DEPRECATED, use pageSize instead. if set, only returns the newest up until limit
:type limit: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetDataEnriched], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'shared',
'limit',
'get_assets_of_team',
'get_assets_of_team_inclusive_self',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasets_enriched" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('shared') is not None: # noqa: E501
_query_params.append((
'shared',
_params['shared'].value if hasattr(_params['shared'], 'value') else _params['shared']
))
if _params.get('limit') is not None: # noqa: E501
_query_params.append((
'limit',
_params['limit'].value if hasattr(_params['limit'], 'value') else _params['limit']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetDataEnriched]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/enriched', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasets_enriched_query_by_name(self, dataset_name : constr(strict=True, min_length=1), shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, exact : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which match the name exactly (not just by prefix)")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[DatasetDataEnriched]: # noqa: E501
"""get_datasets_enriched_query_by_name # noqa: E501
Query for datasets enriched with additional information by their name prefix unless exact flag is set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_enriched_query_by_name(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_name: (required)
:type dataset_name: str
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param exact: if set, only returns the datasets which match the name exactly (not just by prefix)
:type exact: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetDataEnriched]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasets_enriched_query_by_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasets_enriched_query_by_name_with_http_info(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_datasets_enriched_query_by_name_with_http_info(self, dataset_name : constr(strict=True, min_length=1), shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, exact : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which match the name exactly (not just by prefix)")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_datasets_enriched_query_by_name # noqa: E501
Query for datasets enriched with additional information by their name prefix unless exact flag is set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_enriched_query_by_name_with_http_info(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_name: (required)
:type dataset_name: str
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param exact: if set, only returns the datasets which match the name exactly (not just by prefix)
:type exact: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetDataEnriched], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_name',
'shared',
'exact',
'get_assets_of_team',
'get_assets_of_team_inclusive_self',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasets_enriched_query_by_name" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_name']:
_path_params['datasetName'] = _params['dataset_name']
# process the query parameters
_query_params = []
if _params.get('shared') is not None: # noqa: E501
_query_params.append((
'shared',
_params['shared'].value if hasattr(_params['shared'], 'value') else _params['shared']
))
if _params.get('exact') is not None: # noqa: E501
_query_params.append((
'exact',
_params['exact'].value if hasattr(_params['exact'], 'value') else _params['exact']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetDataEnriched]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/enriched/query/name/{datasetName}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasets_query_by_name(self, dataset_name : constr(strict=True, min_length=1), shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, exact : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which match the name exactly (not just by prefix)")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[DatasetData]: # noqa: E501
"""get_datasets_query_by_name # noqa: E501
Query for datasets by their name prefix unless exact flag is set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_query_by_name(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_name: (required)
:type dataset_name: str
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param exact: if set, only returns the datasets which match the name exactly (not just by prefix)
:type exact: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasets_query_by_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasets_query_by_name_with_http_info(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_datasets_query_by_name_with_http_info(self, dataset_name : constr(strict=True, min_length=1), shared : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which have been shared with the user")] = None, exact : Annotated[Optional[StrictBool], Field(description="if set, only returns the datasets which match the name exactly (not just by prefix)")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_datasets_query_by_name # noqa: E501
Query for datasets by their name prefix unless exact flag is set # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasets_query_by_name_with_http_info(dataset_name, shared, exact, get_assets_of_team, get_assets_of_team_inclusive_self, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_name: (required)
:type dataset_name: str
:param shared: if set, only returns the datasets which have been shared with the user
:type shared: bool
:param exact: if set, only returns the datasets which match the name exactly (not just by prefix)
:type exact: bool
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_name',
'shared',
'exact',
'get_assets_of_team',
'get_assets_of_team_inclusive_self',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasets_query_by_name" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_name']:
_path_params['datasetName'] = _params['dataset_name']
# process the query parameters
_query_params = []
if _params.get('shared') is not None: # noqa: E501
_query_params.append((
'shared',
_params['shared'].value if hasattr(_params['shared'], 'value') else _params['shared']
))
if _params.get('exact') is not None: # noqa: E501
_query_params.append((
'exact',
_params['exact'].value if hasattr(_params['exact'], 'value') else _params['exact']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/query/name/{datasetName}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def register_dataset_upload_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], job_status_meta : JobStatusMeta, **kwargs) -> None: # noqa: E501
"""register_dataset_upload_by_id # noqa: E501
Registers a job to track the dataset upload # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_dataset_upload_by_id(dataset_id, job_status_meta, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param job_status_meta: (required)
:type job_status_meta: JobStatusMeta
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the register_dataset_upload_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.register_dataset_upload_by_id_with_http_info(dataset_id, job_status_meta, **kwargs) # noqa: E501
@validate_arguments
def register_dataset_upload_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], job_status_meta : JobStatusMeta, **kwargs) -> ApiResponse: # noqa: E501
"""register_dataset_upload_by_id # noqa: E501
Registers a job to track the dataset upload # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_dataset_upload_by_id_with_http_info(dataset_id, job_status_meta, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param job_status_meta: (required)
:type job_status_meta: JobStatusMeta
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'job_status_meta'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method register_dataset_upload_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['job_status_meta'] is not None:
_body_params = _params['job_status_meta']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/registerDatasetUpload', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_dataset_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], dataset_update_request : Annotated[DatasetUpdateRequest, Field(..., description="updated data for dataset")], **kwargs) -> None: # noqa: E501
"""update_dataset_by_id # noqa: E501
Update a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_dataset_by_id(dataset_id, dataset_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param dataset_update_request: updated data for dataset (required)
:type dataset_update_request: DatasetUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_dataset_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_dataset_by_id_with_http_info(dataset_id, dataset_update_request, **kwargs) # noqa: E501
@validate_arguments
def update_dataset_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], dataset_update_request : Annotated[DatasetUpdateRequest, Field(..., description="updated data for dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""update_dataset_by_id # noqa: E501
Update a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_dataset_by_id_with_http_info(dataset_id, dataset_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param dataset_update_request: updated data for dataset (required)
:type dataset_update_request: DatasetUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'dataset_update_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_dataset_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['dataset_update_request'] is not None:
_body_params = _params['dataset_update_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 87,539 | 49.022857 | 1,098 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/datasources_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictBool, StrictStr, conint, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data import DatasourceConfigVerifyData
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse
from lightly.openapi_generated.swagger_client.models.datasource_purpose import DatasourcePurpose
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data import DatasourceRawSamplesMetadataData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DatasourcesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def get_custom_embedding_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the readUrl for")], **kwargs) -> str: # noqa: E501
"""get_custom_embedding_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_custom_embedding_file_read_url_from_datasource_by_dataset_id(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the csv file within the embeddings folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501
@validate_arguments
def get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the csv file within the embeddings folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501
"""get_custom_embedding_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a custom embedding csv file within the embeddings folder (e.g myCustomEmbedding.csv) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_custom_embedding_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the csv file within the embeddings folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'file_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_custom_embedding_file_read_url_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/embeddings/file', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], purpose : Annotated[Optional[DatasourcePurpose], Field(description="Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT")] = None, **kwargs) -> DatasourceConfig: # noqa: E501
"""get_datasource_by_dataset_id # noqa: E501
Get the datasource of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasource_by_dataset_id(dataset_id, purpose, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param purpose: Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT
:type purpose: DatasourcePurpose
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceConfig
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasource_by_dataset_id_with_http_info(dataset_id, purpose, **kwargs) # noqa: E501
@validate_arguments
def get_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], purpose : Annotated[Optional[DatasourcePurpose], Field(description="Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_datasource_by_dataset_id # noqa: E501
Get the datasource of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasource_by_dataset_id_with_http_info(dataset_id, purpose, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param purpose: Which datasource with which purpose we want to get. Defaults to INPUT_OUTPUT
:type purpose: DatasourcePurpose
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceConfig, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'purpose'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('purpose') is not None: # noqa: E501
_query_params.append((
'purpose',
_params['purpose'].value if hasattr(_params['purpose'], 'value') else _params['purpose']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceConfig",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasource_processed_until_timestamp_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> DatasourceProcessedUntilTimestampResponse: # noqa: E501
"""get_datasource_processed_until_timestamp_by_dataset_id # noqa: E501
Get timestamp of last treated resource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasource_processed_until_timestamp_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceProcessedUntilTimestampResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasource_processed_until_timestamp_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasource_processed_until_timestamp_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_datasource_processed_until_timestamp_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_datasource_processed_until_timestamp_by_dataset_id # noqa: E501
Get timestamp of last treated resource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasource_processed_until_timestamp_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceProcessedUntilTimestampResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasource_processed_until_timestamp_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceProcessedUntilTimestampResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/processedUntilTimestamp', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_datasources_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[DatasourceConfig]: # noqa: E501
"""get_datasources_by_dataset_id # noqa: E501
Get all the datasources of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasources_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasourceConfig]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_datasources_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_datasources_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_datasources_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_datasources_by_dataset_id # noqa: E501
Get all the datasources of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_datasources_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasourceConfig], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_datasources_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasourceConfig]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/all', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_list_of_raw_samples_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, **kwargs) -> DatasourceRawSamplesData: # noqa: E501
"""get_list_of_raw_samples_from_datasource_by_dataset_id # noqa: E501
Get list of raw samples from datasource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_from_datasource_by_dataset_id(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceRawSamplesData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_list_of_raw_samples_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_list_of_raw_samples_from_datasource_by_dataset_id_with_http_info(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, **kwargs) # noqa: E501
@validate_arguments
def get_list_of_raw_samples_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_list_of_raw_samples_from_datasource_by_dataset_id # noqa: E501
Get list of raw samples from datasource # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_from_datasource_by_dataset_id_with_http_info(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceRawSamplesData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'var_from',
'to',
'cursor',
'use_redirected_read_url',
'relevant_filenames_file_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_of_raw_samples_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('var_from') is not None: # noqa: E501
_query_params.append((
'from',
_params['var_from'].value if hasattr(_params['var_from'], 'value') else _params['var_from']
))
if _params.get('to') is not None: # noqa: E501
_query_params.append((
'to',
_params['to'].value if hasattr(_params['to'], 'value') else _params['to']
))
if _params.get('cursor') is not None: # noqa: E501
_query_params.append((
'cursor',
_params['cursor'].value if hasattr(_params['cursor'], 'value') else _params['cursor']
))
if _params.get('use_redirected_read_url') is not None: # noqa: E501
_query_params.append((
'useRedirectedReadUrl',
_params['use_redirected_read_url'].value if hasattr(_params['use_redirected_read_url'], 'value') else _params['use_redirected_read_url']
))
if _params.get('relevant_filenames_file_name') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesFileName',
_params['relevant_filenames_file_name'].value if hasattr(_params['relevant_filenames_file_name'], 'value') else _params['relevant_filenames_file_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceRawSamplesData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/list', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_list_of_raw_samples_metadata_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> DatasourceRawSamplesMetadataData: # noqa: E501
"""get_list_of_raw_samples_metadata_from_datasource_by_dataset_id # noqa: E501
Get list of the raw samples metadata from datasource for a specific taskName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param relevant_filenames_run_id: The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam)
:type relevant_filenames_run_id: str
:param relevant_filenames_artifact_id: The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file.
:type relevant_filenames_artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceRawSamplesMetadataData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_list_of_raw_samples_metadata_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id_with_http_info(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, **kwargs) # noqa: E501
@validate_arguments
def get_list_of_raw_samples_metadata_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_list_of_raw_samples_metadata_from_datasource_by_dataset_id # noqa: E501
Get list of the raw samples metadata from datasource for a specific taskName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_metadata_from_datasource_by_dataset_id_with_http_info(dataset_id, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param relevant_filenames_run_id: The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam)
:type relevant_filenames_run_id: str
:param relevant_filenames_artifact_id: The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file.
:type relevant_filenames_artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceRawSamplesMetadataData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'var_from',
'to',
'cursor',
'use_redirected_read_url',
'relevant_filenames_file_name',
'relevant_filenames_run_id',
'relevant_filenames_artifact_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_of_raw_samples_metadata_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('var_from') is not None: # noqa: E501
_query_params.append((
'from',
_params['var_from'].value if hasattr(_params['var_from'], 'value') else _params['var_from']
))
if _params.get('to') is not None: # noqa: E501
_query_params.append((
'to',
_params['to'].value if hasattr(_params['to'], 'value') else _params['to']
))
if _params.get('cursor') is not None: # noqa: E501
_query_params.append((
'cursor',
_params['cursor'].value if hasattr(_params['cursor'], 'value') else _params['cursor']
))
if _params.get('use_redirected_read_url') is not None: # noqa: E501
_query_params.append((
'useRedirectedReadUrl',
_params['use_redirected_read_url'].value if hasattr(_params['use_redirected_read_url'], 'value') else _params['use_redirected_read_url']
))
if _params.get('relevant_filenames_file_name') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesFileName',
_params['relevant_filenames_file_name'].value if hasattr(_params['relevant_filenames_file_name'], 'value') else _params['relevant_filenames_file_name']
))
if _params.get('relevant_filenames_run_id') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesRunId',
_params['relevant_filenames_run_id'].value if hasattr(_params['relevant_filenames_run_id'], 'value') else _params['relevant_filenames_run_id']
))
if _params.get('relevant_filenames_artifact_id') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesArtifactId',
_params['relevant_filenames_artifact_id'].value if hasattr(_params['relevant_filenames_artifact_id'], 'value') else _params['relevant_filenames_artifact_id']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceRawSamplesMetadataData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/metadata/list', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> DatasourceRawSamplesPredictionsData: # noqa: E501
"""get_list_of_raw_samples_predictions_from_datasource_by_dataset_id # noqa: E501
Get list of the raw samples predictions from datasource for a specific taskName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id(dataset_id, task_name, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param task_name: The prediction task name for which one wants to list the predictions (required)
:type task_name: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param relevant_filenames_run_id: The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam)
:type relevant_filenames_run_id: str
:param relevant_filenames_artifact_id: The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file.
:type relevant_filenames_artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceRawSamplesPredictionsData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(dataset_id, task_name, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, **kwargs) # noqa: E501
@validate_arguments
def get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], var_from : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, to : Annotated[Optional[conint(strict=True, ge=0)], Field(description="Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified. ")] = None, cursor : Annotated[Optional[StrictStr], Field(description="Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list. ")] = None, use_redirected_read_url : Annotated[Optional[StrictBool], Field(description="By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file ")] = None, relevant_filenames_file_name : Annotated[Optional[constr(strict=True, min_length=4)], Field(description="The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details ")] = None, relevant_filenames_run_id : Annotated[Optional[constr(strict=True)], Field(description="The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam) ")] = None, relevant_filenames_artifact_id : Annotated[Optional[constr(strict=True)], Field(description="The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file. ")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_list_of_raw_samples_predictions_from_datasource_by_dataset_id # noqa: E501
Get list of the raw samples predictions from datasource for a specific taskName # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_of_raw_samples_predictions_from_datasource_by_dataset_id_with_http_info(dataset_id, task_name, var_from, to, cursor, use_redirected_read_url, relevant_filenames_file_name, relevant_filenames_run_id, relevant_filenames_artifact_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param task_name: The prediction task name for which one wants to list the predictions (required)
:type task_name: str
:param var_from: Unix timestamp, only samples with a creation date after `from` will be returned. This parameter is ignored if `cursor` is specified.
:type var_from: int
:param to: Unix timestamp, only samples with a creation date before `to` will be returned. This parameter is ignored if `cursor` is specified.
:type to: int
:param cursor: Cursor from previous request, encodes `from` and `to` parameters. Specify to continue reading samples from the list.
:type cursor: str
:param use_redirected_read_url: By default this is set to false unless a S3DelegatedAccess is configured in which case its always true and this param has no effect. When true this will return RedirectedReadUrls instead of ReadUrls meaning that returned URLs allow for unlimited access to the file
:type use_redirected_read_url: bool
:param relevant_filenames_file_name: The name of the file within your datasource which contains a list of relevant filenames to list. See https://docs.lightly.ai/docker/getting_started/first_steps.html#specify-relevant-files for more details
:type relevant_filenames_file_name: str
:param relevant_filenames_run_id: The run id of the run which generated an artifact to be used as the relevant filenames file. (see DatasourceRelevantFilenamesArtifactIdParam)
:type relevant_filenames_run_id: str
:param relevant_filenames_artifact_id: The artifact id of the run provided by DatasourceRelevantFilenamesRunIdParam to be used as the relevant filenames file.
:type relevant_filenames_artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceRawSamplesPredictionsData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'task_name',
'var_from',
'to',
'cursor',
'use_redirected_read_url',
'relevant_filenames_file_name',
'relevant_filenames_run_id',
'relevant_filenames_artifact_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_of_raw_samples_predictions_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('task_name') is not None: # noqa: E501
_query_params.append((
'taskName',
_params['task_name'].value if hasattr(_params['task_name'], 'value') else _params['task_name']
))
if _params.get('var_from') is not None: # noqa: E501
_query_params.append((
'from',
_params['var_from'].value if hasattr(_params['var_from'], 'value') else _params['var_from']
))
if _params.get('to') is not None: # noqa: E501
_query_params.append((
'to',
_params['to'].value if hasattr(_params['to'], 'value') else _params['to']
))
if _params.get('cursor') is not None: # noqa: E501
_query_params.append((
'cursor',
_params['cursor'].value if hasattr(_params['cursor'], 'value') else _params['cursor']
))
if _params.get('use_redirected_read_url') is not None: # noqa: E501
_query_params.append((
'useRedirectedReadUrl',
_params['use_redirected_read_url'].value if hasattr(_params['use_redirected_read_url'], 'value') else _params['use_redirected_read_url']
))
if _params.get('relevant_filenames_file_name') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesFileName',
_params['relevant_filenames_file_name'].value if hasattr(_params['relevant_filenames_file_name'], 'value') else _params['relevant_filenames_file_name']
))
if _params.get('relevant_filenames_run_id') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesRunId',
_params['relevant_filenames_run_id'].value if hasattr(_params['relevant_filenames_run_id'], 'value') else _params['relevant_filenames_run_id']
))
if _params.get('relevant_filenames_artifact_id') is not None: # noqa: E501
_query_params.append((
'relevantFilenamesArtifactId',
_params['relevant_filenames_artifact_id'].value if hasattr(_params['relevant_filenames_artifact_id'], 'value') else _params['relevant_filenames_artifact_id']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceRawSamplesPredictionsData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/predictions/list', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_metadata_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the readUrl for")], **kwargs) -> str: # noqa: E501
"""get_metadata_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_metadata_file_read_url_from_datasource_by_dataset_id(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the metadata folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501
@validate_arguments
def get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=5), Field(..., description="The name of the file within the metadata folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501
"""get_metadata_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a file within the metadata folder (e.g. my_image.json or my_video-099-mp4.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_metadata_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the metadata folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'file_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_metadata_file_read_url_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/metadata/file', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_prediction_file_read_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> str: # noqa: E501
"""get_prediction_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_file_read_url_from_datasource_by_dataset_id(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the prediction folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501
@validate_arguments
def get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501
"""get_prediction_file_read_url_from_datasource_by_dataset_id # noqa: E501
Get the ReadURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_file_read_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the prediction folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'file_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_file_read_url_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/predictions/file', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_prediction_file_write_url_from_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> str: # noqa: E501
"""get_prediction_file_write_url_from_datasource_by_dataset_id # noqa: E501
Get the WriteURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_file_write_url_from_datasource_by_dataset_id(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the prediction folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, **kwargs) # noqa: E501
@validate_arguments
def get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[constr(strict=True, min_length=4), Field(..., description="The name of the file within the prediction folder to get the readUrl for")], **kwargs) -> ApiResponse: # noqa: E501
"""get_prediction_file_write_url_from_datasource_by_dataset_id # noqa: E501
Get the WriteURL of a file within the predictions folder (e.g tasks.json or my_classification_task/schema.json) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_file_write_url_from_datasource_by_dataset_id_with_http_info(dataset_id, file_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: The name of the file within the prediction folder to get the readUrl for (required)
:type file_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'file_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_file_write_url_from_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/predictions/writeUrl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_resource_read_url_redirect(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], path : Annotated[StrictStr, Field(..., description="the resource path")], **kwargs) -> None: # noqa: E501
"""get_resource_read_url_redirect # noqa: E501
This endpoint enables anyone given the correct credentials to access the actual image directly via a redirect. By creating a readURL for the resource and redirecting to that URL, the client can use this endpoint to always have a way to access the resource as there is no expiration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_read_url_redirect(dataset_id, path, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param path: the resource path (required)
:type path: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_resource_read_url_redirect_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_resource_read_url_redirect_with_http_info(dataset_id, path, **kwargs) # noqa: E501
@validate_arguments
def get_resource_read_url_redirect_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], path : Annotated[StrictStr, Field(..., description="the resource path")], **kwargs) -> ApiResponse: # noqa: E501
"""get_resource_read_url_redirect # noqa: E501
This endpoint enables anyone given the correct credentials to access the actual image directly via a redirect. By creating a readURL for the resource and redirecting to that URL, the client can use this endpoint to always have a way to access the resource as there is no expiration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_read_url_redirect_with_http_info(dataset_id, path, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param path: the resource path (required)
:type path: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'path'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resource_read_url_redirect" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('path') is not None: # noqa: E501
_query_params.append((
'path',
_params['path'].value if hasattr(_params['path'], 'value') else _params['path']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['ApiPublicJWTAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/readurlRedirect', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], datasource_config : Annotated[DatasourceConfig, Field(..., description="updated datasource configuration for a dataset")], **kwargs) -> None: # noqa: E501
"""update_datasource_by_dataset_id # noqa: E501
Update the datasource of a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_datasource_by_dataset_id(dataset_id, datasource_config, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param datasource_config: updated datasource configuration for a dataset (required)
:type datasource_config: DatasourceConfig
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_datasource_by_dataset_id_with_http_info(dataset_id, datasource_config, **kwargs) # noqa: E501
@validate_arguments
def update_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], datasource_config : Annotated[DatasourceConfig, Field(..., description="updated datasource configuration for a dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""update_datasource_by_dataset_id # noqa: E501
Update the datasource of a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_datasource_by_dataset_id_with_http_info(dataset_id, datasource_config, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param datasource_config: updated datasource configuration for a dataset (required)
:type datasource_config: DatasourceConfig
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'datasource_config'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['datasource_config'] is not None:
_body_params = _params['datasource_config']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_datasource_processed_until_timestamp_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], datasource_processed_until_timestamp_request : Annotated[DatasourceProcessedUntilTimestampRequest, Field(..., description="The updated timestamp to set")], **kwargs) -> None: # noqa: E501
"""update_datasource_processed_until_timestamp_by_dataset_id # noqa: E501
Update timestamp of last resource in datapool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_datasource_processed_until_timestamp_by_dataset_id(dataset_id, datasource_processed_until_timestamp_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param datasource_processed_until_timestamp_request: The updated timestamp to set (required)
:type datasource_processed_until_timestamp_request: DatasourceProcessedUntilTimestampRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_datasource_processed_until_timestamp_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_datasource_processed_until_timestamp_by_dataset_id_with_http_info(dataset_id, datasource_processed_until_timestamp_request, **kwargs) # noqa: E501
@validate_arguments
def update_datasource_processed_until_timestamp_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], datasource_processed_until_timestamp_request : Annotated[DatasourceProcessedUntilTimestampRequest, Field(..., description="The updated timestamp to set")], **kwargs) -> ApiResponse: # noqa: E501
"""update_datasource_processed_until_timestamp_by_dataset_id # noqa: E501
Update timestamp of last resource in datapool # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_datasource_processed_until_timestamp_by_dataset_id_with_http_info(dataset_id, datasource_processed_until_timestamp_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param datasource_processed_until_timestamp_request: The updated timestamp to set (required)
:type datasource_processed_until_timestamp_request: DatasourceProcessedUntilTimestampRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'datasource_processed_until_timestamp_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_datasource_processed_until_timestamp_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['datasource_processed_until_timestamp_request'] is not None:
_body_params = _params['datasource_processed_until_timestamp_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/processedUntilTimestamp', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def verify_datasource_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> DatasourceConfigVerifyData: # noqa: E501
"""verify_datasource_by_dataset_id # noqa: E501
Test and verify that the configured datasource can be accessed correctly # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_datasource_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DatasourceConfigVerifyData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the verify_datasource_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.verify_datasource_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def verify_datasource_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""verify_datasource_by_dataset_id # noqa: E501
Test and verify that the configured datasource can be accessed correctly # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.verify_datasource_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DatasourceConfigVerifyData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method verify_datasource_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DatasourceConfigVerifyData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/datasource/verify', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 128,887 | 53.729512 | 2,163 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/docker_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictBool, StrictStr, conint, conlist, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.create_docker_worker_registry_entry_request import CreateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.docker_authorization_request import DockerAuthorizationRequest
from lightly.openapi_generated.swagger_client.models.docker_authorization_response import DockerAuthorizationResponse
from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_created_data import DockerRunArtifactCreatedData
from lightly.openapi_generated.swagger_client.models.docker_run_create_request import DockerRunCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_data import DockerRunData
from lightly.openapi_generated.swagger_client.models.docker_run_log_data import DockerRunLogData
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_create_request import DockerRunScheduledCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_data import DockerRunScheduledData
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_state import DockerRunScheduledState
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_update_request import DockerRunScheduledUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_update_request import DockerRunUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_user_stats import DockerUserStats
from lightly.openapi_generated.swagger_client.models.docker_worker_authorization_request import DockerWorkerAuthorizationRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_create_request import DockerWorkerConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_data import DockerWorkerConfigData
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_create_request import DockerWorkerConfigV2CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_data import DockerWorkerConfigV2Data
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_create_request import DockerWorkerConfigV3CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_data import DockerWorkerConfigV3Data
from lightly.openapi_generated.swagger_client.models.docker_worker_registry_entry_data import DockerWorkerRegistryEntryData
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.update_docker_worker_registry_entry_request import UpdateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DockerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def cancel_scheduled_docker_run_state_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], **kwargs) -> None: # noqa: E501
"""cancel_scheduled_docker_run_state_by_id # noqa: E501
Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_scheduled_docker_run_state_by_id(dataset_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the cancel_scheduled_docker_run_state_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, **kwargs) # noqa: E501
@validate_arguments
def cancel_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], **kwargs) -> ApiResponse: # noqa: E501
"""cancel_scheduled_docker_run_state_by_id # noqa: E501
Cancel a scheduled run. This will fail if the state of the scheduled run is no longer OPEN (e.g when it is LOCKED) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_scheduled_docker_run_state_by_id_with_http_info(dataset_id, scheduled_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'scheduled_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_scheduled_docker_run_state_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['scheduled_id']:
_path_params['scheduledId'] = _params['scheduled_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule/{scheduledId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def confirm_docker_run_artifact_creation(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], artifact_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the artifact of the docker run")], **kwargs) -> None: # noqa: E501
"""confirm_docker_run_artifact_creation # noqa: E501
confirm that the docker run artifact has been uploaded and is available # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.confirm_docker_run_artifact_creation(run_id, artifact_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param artifact_id: ObjectId of the artifact of the docker run (required)
:type artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the confirm_docker_run_artifact_creation_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.confirm_docker_run_artifact_creation_with_http_info(run_id, artifact_id, **kwargs) # noqa: E501
@validate_arguments
def confirm_docker_run_artifact_creation_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], artifact_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the artifact of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""confirm_docker_run_artifact_creation # noqa: E501
confirm that the docker run artifact has been uploaded and is available # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.confirm_docker_run_artifact_creation_with_http_info(run_id, artifact_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param artifact_id: ObjectId of the artifact of the docker run (required)
:type artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'run_id',
'artifact_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method confirm_docker_run_artifact_creation" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
if _params['artifact_id']:
_path_params['artifactId'] = _params['artifact_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/artifacts/{artifactId}/confirmUpload', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_run(self, docker_run_create_request : DockerRunCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_docker_run # noqa: E501
Creates a new docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run(docker_run_create_request, async_req=True)
>>> result = thread.get()
:param docker_run_create_request: (required)
:type docker_run_create_request: DockerRunCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_run_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_run_with_http_info(docker_run_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_docker_run_with_http_info(self, docker_run_create_request : DockerRunCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_run # noqa: E501
Creates a new docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_with_http_info(docker_run_create_request, async_req=True)
>>> result = thread.get()
:param docker_run_create_request: (required)
:type docker_run_create_request: DockerRunCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_run_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_run" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_run_create_request'] is not None:
_body_params = _params['docker_run_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_run_artifact(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], docker_run_artifact_create_request : DockerRunArtifactCreateRequest, **kwargs) -> DockerRunArtifactCreatedData: # noqa: E501
"""create_docker_run_artifact # noqa: E501
creates a docker run artifact and returns the writeUrl and artifactId to upload and confirm # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_artifact(run_id, docker_run_artifact_create_request, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param docker_run_artifact_create_request: (required)
:type docker_run_artifact_create_request: DockerRunArtifactCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerRunArtifactCreatedData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_run_artifact_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_run_artifact_with_http_info(run_id, docker_run_artifact_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_docker_run_artifact_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], docker_run_artifact_create_request : DockerRunArtifactCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_run_artifact # noqa: E501
creates a docker run artifact and returns the writeUrl and artifactId to upload and confirm # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_artifact_with_http_info(run_id, docker_run_artifact_create_request, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param docker_run_artifact_create_request: (required)
:type docker_run_artifact_create_request: DockerRunArtifactCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerRunArtifactCreatedData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'run_id',
'docker_run_artifact_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_run_artifact" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_run_artifact_create_request'] is not None:
_body_params = _params['docker_run_artifact_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "DockerRunArtifactCreatedData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/artifacts', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_run_scheduled_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], docker_run_scheduled_create_request : DockerRunScheduledCreateRequest, disable_config_validation : Annotated[Optional[StrictBool], Field(description="if set, disables the sanity check and validation where we check if the provided configuration can run on your datasource e.g if predictions are used, we check that the bucket structure + tasks.json, schema.json are correct if metadata is used, we check that the bucket structure + schema.json are correct if relevantFilenamesFile is set, we check that the file exists ")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_docker_run_scheduled_by_dataset_id # noqa: E501
Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_scheduled_by_dataset_id(dataset_id, docker_run_scheduled_create_request, disable_config_validation, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param docker_run_scheduled_create_request: (required)
:type docker_run_scheduled_create_request: DockerRunScheduledCreateRequest
:param disable_config_validation: if set, disables the sanity check and validation where we check if the provided configuration can run on your datasource e.g if predictions are used, we check that the bucket structure + tasks.json, schema.json are correct if metadata is used, we check that the bucket structure + schema.json are correct if relevantFilenamesFile is set, we check that the file exists
:type disable_config_validation: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_run_scheduled_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_run_scheduled_by_dataset_id_with_http_info(dataset_id, docker_run_scheduled_create_request, disable_config_validation, **kwargs) # noqa: E501
@validate_arguments
def create_docker_run_scheduled_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], docker_run_scheduled_create_request : DockerRunScheduledCreateRequest, disable_config_validation : Annotated[Optional[StrictBool], Field(description="if set, disables the sanity check and validation where we check if the provided configuration can run on your datasource e.g if predictions are used, we check that the bucket structure + tasks.json, schema.json are correct if metadata is used, we check that the bucket structure + schema.json are correct if relevantFilenamesFile is set, we check that the file exists ")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_run_scheduled_by_dataset_id # noqa: E501
Schedule a docker run by dataset id. With docker runs it's possible to process unlabeled images from a datasource and use active learning to select the most relevant samples for further processing and visualization in the web app # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_run_scheduled_by_dataset_id_with_http_info(dataset_id, docker_run_scheduled_create_request, disable_config_validation, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param docker_run_scheduled_create_request: (required)
:type docker_run_scheduled_create_request: DockerRunScheduledCreateRequest
:param disable_config_validation: if set, disables the sanity check and validation where we check if the provided configuration can run on your datasource e.g if predictions are used, we check that the bucket structure + tasks.json, schema.json are correct if metadata is used, we check that the bucket structure + schema.json are correct if relevantFilenamesFile is set, we check that the file exists
:type disable_config_validation: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'docker_run_scheduled_create_request',
'disable_config_validation'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_run_scheduled_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('disable_config_validation') is not None: # noqa: E501
_query_params.append((
'disableConfigValidation',
_params['disable_config_validation'].value if hasattr(_params['disable_config_validation'], 'value') else _params['disable_config_validation']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_run_scheduled_create_request'] is not None:
_body_params = _params['docker_run_scheduled_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_worker_config(self, docker_worker_config_create_request : DockerWorkerConfigCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_docker_worker_config # noqa: E501
Creates a docker worker configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config(docker_worker_config_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_create_request: (required)
:type docker_worker_config_create_request: DockerWorkerConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_worker_config_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_worker_config_with_http_info(docker_worker_config_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_docker_worker_config_with_http_info(self, docker_worker_config_create_request : DockerWorkerConfigCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_worker_config # noqa: E501
Creates a docker worker configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_with_http_info(docker_worker_config_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_create_request: (required)
:type docker_worker_config_create_request: DockerWorkerConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_worker_config_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_worker_config" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_worker_config_create_request'] is not None:
_body_params = _params['docker_worker_config_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_worker_config_v2(self, docker_worker_config_v2_create_request : DockerWorkerConfigV2CreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_docker_worker_config_v2 # noqa: E501
Creates a docker worker v2 configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_v2(docker_worker_config_v2_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_v2_create_request: (required)
:type docker_worker_config_v2_create_request: DockerWorkerConfigV2CreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_worker_config_v2_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_worker_config_v2_with_http_info(docker_worker_config_v2_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_docker_worker_config_v2_with_http_info(self, docker_worker_config_v2_create_request : DockerWorkerConfigV2CreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_worker_config_v2 # noqa: E501
Creates a docker worker v2 configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_v2_with_http_info(docker_worker_config_v2_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_v2_create_request: (required)
:type docker_worker_config_v2_create_request: DockerWorkerConfigV2CreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_worker_config_v2_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_worker_config_v2" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_worker_config_v2_create_request'] is not None:
_body_params = _params['docker_worker_config_v2_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config/v2', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_docker_worker_config_v3(self, docker_worker_config_v3_create_request : DockerWorkerConfigV3CreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_docker_worker_config_v3 # noqa: E501
Creates a docker worker v3 configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_v3(docker_worker_config_v3_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_v3_create_request: (required)
:type docker_worker_config_v3_create_request: DockerWorkerConfigV3CreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_docker_worker_config_v3_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_docker_worker_config_v3_with_http_info(docker_worker_config_v3_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_docker_worker_config_v3_with_http_info(self, docker_worker_config_v3_create_request : DockerWorkerConfigV3CreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_docker_worker_config_v3 # noqa: E501
Creates a docker worker v3 configuration. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_docker_worker_config_v3_with_http_info(docker_worker_config_v3_create_request, async_req=True)
>>> result = thread.get()
:param docker_worker_config_v3_create_request: (required)
:type docker_worker_config_v3_create_request: DockerWorkerConfigV3CreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_worker_config_v3_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_docker_worker_config_v3" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_worker_config_v3_create_request'] is not None:
_body_params = _params['docker_worker_config_v3_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config/v3', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def delete_docker_worker_registry_entry_by_id(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], **kwargs) -> None: # noqa: E501
"""delete_docker_worker_registry_entry_by_id # noqa: E501
Deletes a worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_docker_worker_registry_entry_by_id(worker_id, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_docker_worker_registry_entry_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
@validate_arguments
def delete_docker_worker_registry_entry_by_id_with_http_info(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], **kwargs) -> ApiResponse: # noqa: E501
"""delete_docker_worker_registry_entry_by_id # noqa: E501
Deletes a worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_docker_worker_registry_entry_by_id_with_http_info(worker_id, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'worker_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_docker_worker_registry_entry_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['worker_id']:
_path_params['workerId'] = _params['worker_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_license_information(self, **kwargs) -> DockerLicenseInformation: # noqa: E501
"""get_docker_license_information # noqa: E501
Requests license information to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_license_information(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerLicenseInformation
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_license_information_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_license_information_with_http_info(**kwargs) # noqa: E501
@validate_arguments
def get_docker_license_information_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_license_information # noqa: E501
Requests license information to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_license_information_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerLicenseInformation, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_license_information" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerLicenseInformation",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/licenseInformation', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_artifact_read_url_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], artifact_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the artifact of the docker run")], **kwargs) -> str: # noqa: E501
"""get_docker_run_artifact_read_url_by_id # noqa: E501
Get the url of a specific docker runs artifact # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_artifact_read_url_by_id(run_id, artifact_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param artifact_id: ObjectId of the artifact of the docker run (required)
:type artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_artifact_read_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_artifact_read_url_by_id_with_http_info(run_id, artifact_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_artifact_read_url_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], artifact_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the artifact of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_run_artifact_read_url_by_id # noqa: E501
Get the url of a specific docker runs artifact # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_artifact_read_url_by_id_with_http_info(run_id, artifact_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param artifact_id: ObjectId of the artifact of the docker run (required)
:type artifact_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'run_id',
'artifact_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_artifact_read_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
if _params['artifact_id']:
_path_params['artifactId'] = _params['artifact_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/artifacts/{artifactId}/readurl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> DockerRunData: # noqa: E501
"""get_docker_run_by_id # noqa: E501
Gets a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_id(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerRunData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_by_id_with_http_info(run_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_run_by_id # noqa: E501
Gets a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerRunData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'run_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerRunData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_by_scheduled_id(self, scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], **kwargs) -> DockerRunData: # noqa: E501
"""get_docker_run_by_scheduled_id # noqa: E501
Retrieves the associated docker run of a scheduled run; returns the docker run by the id of the scheduled run which caused this docker run. If a scheduled docker run has not yet started being processed by a worker, a 404 will be returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_scheduled_id(scheduled_id, async_req=True)
>>> result = thread.get()
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerRunData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_by_scheduled_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_by_scheduled_id_with_http_info(scheduled_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_by_scheduled_id_with_http_info(self, scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_run_by_scheduled_id # noqa: E501
Retrieves the associated docker run of a scheduled run; returns the docker run by the id of the scheduled run which caused this docker run. If a scheduled docker run has not yet started being processed by a worker, a 404 will be returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_by_scheduled_id_with_http_info(scheduled_id, async_req=True)
>>> result = thread.get()
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerRunData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'scheduled_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_by_scheduled_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['scheduled_id']:
_path_params['scheduledId'] = _params['scheduled_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerRunData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/schedule/{scheduledId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_logs_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], cursor : Annotated[Optional[conint(strict=True, ge=0)], Field(description="the cursor of where the logs last were")] = None, **kwargs) -> DockerRunLogData: # noqa: E501
"""get_docker_run_logs_by_id # noqa: E501
Gets the logs of a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_logs_by_id(run_id, cursor, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param cursor: the cursor of where the logs last were
:type cursor: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerRunLogData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_logs_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_logs_by_id_with_http_info(run_id, cursor, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_logs_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], cursor : Annotated[Optional[conint(strict=True, ge=0)], Field(description="the cursor of where the logs last were")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_run_logs_by_id # noqa: E501
Gets the logs of a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_logs_by_id_with_http_info(run_id, cursor, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param cursor: the cursor of where the logs last were
:type cursor: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerRunLogData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'run_id',
'cursor'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_logs_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
if _params.get('cursor') is not None: # noqa: E501
_query_params.append((
'cursor',
_params['cursor'].value if hasattr(_params['cursor'], 'value') else _params['cursor']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerRunLogData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/logs', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_report_read_url_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> str: # noqa: E501
"""(Deprecated) get_docker_run_report_read_url_by_id # noqa: E501
Get the url of a specific docker runs report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_read_url_by_id(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_report_read_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_report_read_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_report_read_url_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) get_docker_run_report_read_url_by_id # noqa: E501
Get the url of a specific docker runs report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_read_url_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
warnings.warn("GET /v1/docker/runs/{runId}/readReportUrl is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'run_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_report_read_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/readReportUrl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_report_write_url_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> str: # noqa: E501
"""(Deprecated) get_docker_run_report_write_url_by_id # noqa: E501
Get the signed url to upload a report of a docker run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_write_url_by_id(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_report_write_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_report_write_url_by_id_with_http_info(run_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_report_write_url_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) get_docker_run_report_write_url_by_id # noqa: E501
Get the signed url to upload a report of a docker run # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_report_write_url_by_id_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
warnings.warn("GET /v1/docker/runs/{runId}/writeReportUrl is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'run_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_report_write_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/writeReportUrl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_run_tags(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> List[TagData]: # noqa: E501
"""get_docker_run_tags # noqa: E501
Gets all tags which were created from a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_tags(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[TagData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_run_tags_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_run_tags_with_http_info(run_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_run_tags_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_run_tags # noqa: E501
Gets all tags which were created from a docker run by docker run id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_run_tags_with_http_info(run_id, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[TagData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'run_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_run_tags" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[TagData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/{runId}/tags', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs(self, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> List[DockerRunData]: # noqa: E501
"""get_docker_runs # noqa: E501
Gets all docker runs for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs(page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerRunData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_with_http_info(page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_with_http_info(self, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs # noqa: E501
Gets all docker runs for a user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_with_http_info(page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerRunData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'page_size',
'page_offset',
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerRunData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs_count(self, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> str: # noqa: E501
"""get_docker_runs_count # noqa: E501
Gets the total count of the amount of runs existing for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_count(get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_count_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_count_with_http_info(get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_count_with_http_info(self, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs_count # noqa: E501
Gets the total count of the amount of runs existing for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_count_with_http_info(get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_count" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/count', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs_query_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> List[DockerRunData]: # noqa: E501
"""get_docker_runs_query_by_dataset_id # noqa: E501
Get all docker runs of a user by dataset id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_query_by_dataset_id(dataset_id, page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerRunData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_query_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_query_by_dataset_id_with_http_info(dataset_id, page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_query_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs_query_by_dataset_id # noqa: E501
Get all docker runs of a user by dataset id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_query_by_dataset_id_with_http_info(dataset_id, page_size, page_offset, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerRunData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'page_size',
'page_offset',
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_query_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerRunData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/runs/query/datasetId/{datasetId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs_scheduled_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], state : Optional[DockerRunScheduledState] = None, **kwargs) -> List[DockerRunScheduledData]: # noqa: E501
"""get_docker_runs_scheduled_by_dataset_id # noqa: E501
Get all scheduled docker runs by dataset id. If no state is specified, returns runs which have not yet finished (neither DONE or CANCELED). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_dataset_id(dataset_id, state, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param state:
:type state: DockerRunScheduledState
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerRunScheduledData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_scheduled_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_scheduled_by_dataset_id_with_http_info(dataset_id, state, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_scheduled_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], state : Optional[DockerRunScheduledState] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs_scheduled_by_dataset_id # noqa: E501
Get all scheduled docker runs by dataset id. If no state is specified, returns runs which have not yet finished (neither DONE or CANCELED). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_dataset_id_with_http_info(dataset_id, state, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param state:
:type state: DockerRunScheduledState
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerRunScheduledData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'state'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_scheduled_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('state') is not None: # noqa: E501
_query_params.append((
'state',
_params['state'].value if hasattr(_params['state'], 'value') else _params['state']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerRunScheduledData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/schedule', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs_scheduled_by_state_and_labels(self, state : Optional[DockerRunScheduledState] = None, labels : Optional[conlist(StrictStr)] = None, version : Optional[StrictStr] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> List[DockerRunScheduledData]: # noqa: E501
"""get_docker_runs_scheduled_by_state_and_labels # noqa: E501
Get all scheduled docker runs of the user. Additionally, you can filter by state. Furthermore, you can filter by only providing labels and only return scheduled runs whose runsOn labels are included in the provided labels. Runs are filtered by the provided version parameter. Version parameter set to * returns all configs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_state_and_labels(state, labels, version, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param state:
:type state: DockerRunScheduledState
:param labels:
:type labels: List[str]
:param version:
:type version: str
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerRunScheduledData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_scheduled_by_state_and_labels_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_scheduled_by_state_and_labels_with_http_info(state, labels, version, get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_scheduled_by_state_and_labels_with_http_info(self, state : Optional[DockerRunScheduledState] = None, labels : Optional[conlist(StrictStr)] = None, version : Optional[StrictStr] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs_scheduled_by_state_and_labels # noqa: E501
Get all scheduled docker runs of the user. Additionally, you can filter by state. Furthermore, you can filter by only providing labels and only return scheduled runs whose runsOn labels are included in the provided labels. Runs are filtered by the provided version parameter. Version parameter set to * returns all configs # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_state_and_labels_with_http_info(state, labels, version, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param state:
:type state: DockerRunScheduledState
:param labels:
:type labels: List[str]
:param version:
:type version: str
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerRunScheduledData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'state',
'labels',
'version',
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_scheduled_by_state_and_labels" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('state') is not None: # noqa: E501
_query_params.append((
'state',
_params['state'].value if hasattr(_params['state'], 'value') else _params['state']
))
if _params.get('labels') is not None: # noqa: E501
_query_params.append((
'labels',
_params['labels'].value if hasattr(_params['labels'], 'value') else _params['labels']
))
_collection_formats['labels'] = 'multi'
if _params.get('version') is not None: # noqa: E501
_query_params.append((
'version',
_params['version'].value if hasattr(_params['version'], 'value') else _params['version']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerRunScheduledData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/schedule', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_runs_scheduled_by_worker_id(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], state : Optional[DockerRunScheduledState] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> List[DockerRunScheduledData]: # noqa: E501
"""get_docker_runs_scheduled_by_worker_id # noqa: E501
Get all scheduled runs that might be picked up by the worker with that workerId. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_worker_id(worker_id, state, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param state:
:type state: DockerRunScheduledState
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerRunScheduledData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_runs_scheduled_by_worker_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_runs_scheduled_by_worker_id_with_http_info(worker_id, state, get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_runs_scheduled_by_worker_id_with_http_info(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], state : Optional[DockerRunScheduledState] = None, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_runs_scheduled_by_worker_id # noqa: E501
Get all scheduled runs that might be picked up by the worker with that workerId. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_runs_scheduled_by_worker_id_with_http_info(worker_id, state, get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param state:
:type state: DockerRunScheduledState
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerRunScheduledData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'worker_id',
'state',
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_runs_scheduled_by_worker_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['worker_id']:
_path_params['workerId'] = _params['worker_id']
# process the query parameters
_query_params = []
if _params.get('state') is not None: # noqa: E501
_query_params.append((
'state',
_params['state'].value if hasattr(_params['state'], 'value') else _params['state']
))
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerRunScheduledData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/{workerId}/schedule', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_config_by_id(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> DockerWorkerConfigData: # noqa: E501
"""get_docker_worker_config_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but expects (and will fail if not) the config to be of v0 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_by_id(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerWorkerConfigData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_config_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_config_by_id_with_http_info(config_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_config_by_id_with_http_info(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_config_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but expects (and will fail if not) the config to be of v0 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_by_id_with_http_info(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerWorkerConfigData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'config_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_config_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['config_id']:
_path_params['configId'] = _params['config_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerWorkerConfigData",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config/{configId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_config_v2_by_id(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> DockerWorkerConfigV2Data: # noqa: E501
"""get_docker_worker_config_v2_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but expects (and will fail if not) the config to be of v2 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_v2_by_id(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerWorkerConfigV2Data
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_config_v2_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_config_v2_by_id_with_http_info(config_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_config_v2_by_id_with_http_info(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_config_v2_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but expects (and will fail if not) the config to be of v2 # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_v2_by_id_with_http_info(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerWorkerConfigV2Data, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'config_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_config_v2_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['config_id']:
_path_params['configId'] = _params['config_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerWorkerConfigV2Data",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config/v2/{configId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_config_v3_by_id(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> DockerWorkerConfigV3Data: # noqa: E501
"""get_docker_worker_config_v3_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but requires the config to be of v3. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_v3_by_id(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerWorkerConfigV3Data
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_config_v3_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_config_v3_by_id_with_http_info(config_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_config_v3_by_id_with_http_info(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_config_v3_by_id # noqa: E501
Gets a docker worker configuration by id. It will try to return the config version but requires the config to be of v3. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_config_v3_by_id_with_http_info(config_id, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerWorkerConfigV3Data, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'config_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_config_v3_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['config_id']:
_path_params['configId'] = _params['config_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerWorkerConfigV3Data",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config/v3/{configId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_configs(self, **kwargs) -> List[DockerWorkerConfigData]: # noqa: E501
"""get_docker_worker_configs # noqa: E501
Get docker worker configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_configs(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerWorkerConfigData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_configs_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_configs_with_http_info(**kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_configs_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_configs # noqa: E501
Get docker worker configurations. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_configs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerWorkerConfigData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_configs" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerWorkerConfigData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/config', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_registry_entries(self, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> List[DockerWorkerRegistryEntryData]: # noqa: E501
"""get_docker_worker_registry_entries # noqa: E501
Returns all worker registry entries for a given user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entries(get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DockerWorkerRegistryEntryData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_registry_entries_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_registry_entries_with_http_info(get_assets_of_team, get_assets_of_team_inclusive_self, **kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_registry_entries_with_http_info(self, get_assets_of_team : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user")] = None, get_assets_of_team_inclusive_self : Annotated[Optional[StrictBool], Field(description="if this flag is true, we get the relevant asset of the team of the user including the assets of the user")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_registry_entries # noqa: E501
Returns all worker registry entries for a given user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entries_with_http_info(get_assets_of_team, get_assets_of_team_inclusive_self, async_req=True)
>>> result = thread.get()
:param get_assets_of_team: if this flag is true, we get the relevant asset of the team of the user rather than the assets of the user
:type get_assets_of_team: bool
:param get_assets_of_team_inclusive_self: if this flag is true, we get the relevant asset of the team of the user including the assets of the user
:type get_assets_of_team_inclusive_self: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DockerWorkerRegistryEntryData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'get_assets_of_team',
'get_assets_of_team_inclusive_self'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_registry_entries" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('get_assets_of_team') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeam',
_params['get_assets_of_team'].value if hasattr(_params['get_assets_of_team'], 'value') else _params['get_assets_of_team']
))
if _params.get('get_assets_of_team_inclusive_self') is not None: # noqa: E501
_query_params.append((
'getAssetsOfTeamInclusiveSelf',
_params['get_assets_of_team_inclusive_self'].value if hasattr(_params['get_assets_of_team_inclusive_self'], 'value') else _params['get_assets_of_team_inclusive_self']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DockerWorkerRegistryEntryData]",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_docker_worker_registry_entry_by_id(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], **kwargs) -> DockerWorkerRegistryEntryData: # noqa: E501
"""get_docker_worker_registry_entry_by_id # noqa: E501
Returns worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entry_by_id(worker_id, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerWorkerRegistryEntryData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_docker_worker_registry_entry_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_docker_worker_registry_entry_by_id_with_http_info(worker_id, **kwargs) # noqa: E501
@validate_arguments
def get_docker_worker_registry_entry_by_id_with_http_info(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], **kwargs) -> ApiResponse: # noqa: E501
"""get_docker_worker_registry_entry_by_id # noqa: E501
Returns worker registry entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_docker_worker_registry_entry_by_id_with_http_info(worker_id, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerWorkerRegistryEntryData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'worker_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_docker_worker_registry_entry_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['worker_id']:
_path_params['workerId'] = _params['worker_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerWorkerRegistryEntryData",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def post_docker_authorization_request(self, docker_authorization_request : DockerAuthorizationRequest, **kwargs) -> DockerAuthorizationResponse: # noqa: E501
"""post_docker_authorization_request # noqa: E501
Performs an authorization to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_authorization_request(docker_authorization_request, async_req=True)
>>> result = thread.get()
:param docker_authorization_request: (required)
:type docker_authorization_request: DockerAuthorizationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: DockerAuthorizationResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the post_docker_authorization_request_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.post_docker_authorization_request_with_http_info(docker_authorization_request, **kwargs) # noqa: E501
@validate_arguments
def post_docker_authorization_request_with_http_info(self, docker_authorization_request : DockerAuthorizationRequest, **kwargs) -> ApiResponse: # noqa: E501
"""post_docker_authorization_request # noqa: E501
Performs an authorization to run the container. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_authorization_request_with_http_info(docker_authorization_request, async_req=True)
>>> result = thread.get()
:param docker_authorization_request: (required)
:type docker_authorization_request: DockerAuthorizationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(DockerAuthorizationResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_authorization_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_docker_authorization_request" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_authorization_request'] is not None:
_body_params = _params['docker_authorization_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "DockerAuthorizationResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/authorization', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def post_docker_usage_stats(self, docker_user_stats : DockerUserStats, **kwargs) -> None: # noqa: E501
"""post_docker_usage_stats # noqa: E501
Adds a diagnostic entry of user stats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_usage_stats(docker_user_stats, async_req=True)
>>> result = thread.get()
:param docker_user_stats: (required)
:type docker_user_stats: DockerUserStats
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the post_docker_usage_stats_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.post_docker_usage_stats_with_http_info(docker_user_stats, **kwargs) # noqa: E501
@validate_arguments
def post_docker_usage_stats_with_http_info(self, docker_user_stats : DockerUserStats, **kwargs) -> ApiResponse: # noqa: E501
"""post_docker_usage_stats # noqa: E501
Adds a diagnostic entry of user stats. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_usage_stats_with_http_info(docker_user_stats, async_req=True)
>>> result = thread.get()
:param docker_user_stats: (required)
:type docker_user_stats: DockerUserStats
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'docker_user_stats'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_docker_usage_stats" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_user_stats'] is not None:
_body_params = _params['docker_user_stats']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def post_docker_worker_authorization_request(self, docker_worker_authorization_request : DockerWorkerAuthorizationRequest, **kwargs) -> str: # noqa: E501
"""post_docker_worker_authorization_request # noqa: E501
Performs an authorization to run the Lightly Worker. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_worker_authorization_request(docker_worker_authorization_request, async_req=True)
>>> result = thread.get()
:param docker_worker_authorization_request: (required)
:type docker_worker_authorization_request: DockerWorkerAuthorizationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the post_docker_worker_authorization_request_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.post_docker_worker_authorization_request_with_http_info(docker_worker_authorization_request, **kwargs) # noqa: E501
@validate_arguments
def post_docker_worker_authorization_request_with_http_info(self, docker_worker_authorization_request : DockerWorkerAuthorizationRequest, **kwargs) -> ApiResponse: # noqa: E501
"""post_docker_worker_authorization_request # noqa: E501
Performs an authorization to run the Lightly Worker. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_docker_worker_authorization_request_with_http_info(docker_worker_authorization_request, async_req=True)
>>> result = thread.get()
:param docker_worker_authorization_request: (required)
:type docker_worker_authorization_request: DockerWorkerAuthorizationRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'docker_worker_authorization_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method post_docker_worker_authorization_request" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_worker_authorization_request'] is not None:
_body_params = _params['docker_worker_authorization_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/workerAuthorization', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def register_docker_worker(self, create_docker_worker_registry_entry_request : CreateDockerWorkerRegistryEntryRequest, for_user_id : Annotated[Optional[StrictStr], Field(description="The userId for which we want to create the worker for. This is only allowed for users within the same team.")] = None, **kwargs) -> CreateEntityResponse: # noqa: E501
"""register_docker_worker # noqa: E501
Registers a worker for a user. If a worker with the same name is passed that already exists, the same workerId will be returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_docker_worker(create_docker_worker_registry_entry_request, for_user_id, async_req=True)
>>> result = thread.get()
:param create_docker_worker_registry_entry_request: (required)
:type create_docker_worker_registry_entry_request: CreateDockerWorkerRegistryEntryRequest
:param for_user_id: The userId for which we want to create the worker for. This is only allowed for users within the same team.
:type for_user_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the register_docker_worker_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.register_docker_worker_with_http_info(create_docker_worker_registry_entry_request, for_user_id, **kwargs) # noqa: E501
@validate_arguments
def register_docker_worker_with_http_info(self, create_docker_worker_registry_entry_request : CreateDockerWorkerRegistryEntryRequest, for_user_id : Annotated[Optional[StrictStr], Field(description="The userId for which we want to create the worker for. This is only allowed for users within the same team.")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""register_docker_worker # noqa: E501
Registers a worker for a user. If a worker with the same name is passed that already exists, the same workerId will be returned # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.register_docker_worker_with_http_info(create_docker_worker_registry_entry_request, for_user_id, async_req=True)
>>> result = thread.get()
:param create_docker_worker_registry_entry_request: (required)
:type create_docker_worker_registry_entry_request: CreateDockerWorkerRegistryEntryRequest
:param for_user_id: The userId for which we want to create the worker for. This is only allowed for users within the same team.
:type for_user_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'create_docker_worker_registry_entry_request',
'for_user_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method register_docker_worker" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('for_user_id') is not None: # noqa: E501
_query_params.append((
'forUserId',
_params['for_user_id'].value if hasattr(_params['for_user_id'], 'value') else _params['for_user_id']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['create_docker_worker_registry_entry_request'] is not None:
_body_params = _params['create_docker_worker_registry_entry_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/docker/worker', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_docker_run_by_id(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], docker_run_update_request : DockerRunUpdateRequest, **kwargs) -> None: # noqa: E501
"""update_docker_run_by_id # noqa: E501
Updates a docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_run_by_id(run_id, docker_run_update_request, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param docker_run_update_request: (required)
:type docker_run_update_request: DockerRunUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_docker_run_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_docker_run_by_id_with_http_info(run_id, docker_run_update_request, **kwargs) # noqa: E501
@validate_arguments
def update_docker_run_by_id_with_http_info(self, run_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker run")], docker_run_update_request : DockerRunUpdateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""update_docker_run_by_id # noqa: E501
Updates a docker run database entry. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_run_by_id_with_http_info(run_id, docker_run_update_request, async_req=True)
>>> result = thread.get()
:param run_id: ObjectId of the docker run (required)
:type run_id: str
:param docker_run_update_request: (required)
:type docker_run_update_request: DockerRunUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'run_id',
'docker_run_update_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_run_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['run_id']:
_path_params['runId'] = _params['run_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_run_update_request'] is not None:
_body_params = _params['docker_run_update_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker/runs/{runId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_docker_worker_config_by_id(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], docker_worker_config_create_request : DockerWorkerConfigCreateRequest, **kwargs) -> None: # noqa: E501
"""(Deprecated) update_docker_worker_config_by_id # noqa: E501
DEPRECATED, DONT USE. Updates a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_config_by_id(config_id, docker_worker_config_create_request, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param docker_worker_config_create_request: (required)
:type docker_worker_config_create_request: DockerWorkerConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_docker_worker_config_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_docker_worker_config_by_id_with_http_info(config_id, docker_worker_config_create_request, **kwargs) # noqa: E501
@validate_arguments
def update_docker_worker_config_by_id_with_http_info(self, config_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker config")], docker_worker_config_create_request : DockerWorkerConfigCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) update_docker_worker_config_by_id # noqa: E501
DEPRECATED, DONT USE. Updates a docker worker configuration by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_config_by_id_with_http_info(config_id, docker_worker_config_create_request, async_req=True)
>>> result = thread.get()
:param config_id: ObjectId of the docker worker config (required)
:type config_id: str
:param docker_worker_config_create_request: (required)
:type docker_worker_config_create_request: DockerWorkerConfigCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
warnings.warn("PUT /v1/docker/worker/config/{configId} is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'config_id',
'docker_worker_config_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_worker_config_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['config_id']:
_path_params['configId'] = _params['config_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_worker_config_create_request'] is not None:
_body_params = _params['docker_worker_config_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker/worker/config/{configId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_docker_worker_registry_entry_by_id(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], update_docker_worker_registry_entry_request : UpdateDockerWorkerRegistryEntryRequest, **kwargs) -> None: # noqa: E501
"""update_docker_worker_registry_entry_by_id # noqa: E501
Updates the worker status by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_registry_entry_by_id(worker_id, update_docker_worker_registry_entry_request, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param update_docker_worker_registry_entry_request: (required)
:type update_docker_worker_registry_entry_request: UpdateDockerWorkerRegistryEntryRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_docker_worker_registry_entry_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_docker_worker_registry_entry_by_id_with_http_info(worker_id, update_docker_worker_registry_entry_request, **kwargs) # noqa: E501
@validate_arguments
def update_docker_worker_registry_entry_by_id_with_http_info(self, worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], update_docker_worker_registry_entry_request : UpdateDockerWorkerRegistryEntryRequest, **kwargs) -> ApiResponse: # noqa: E501
"""update_docker_worker_registry_entry_by_id # noqa: E501
Updates the worker status by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_docker_worker_registry_entry_by_id_with_http_info(worker_id, update_docker_worker_registry_entry_request, async_req=True)
>>> result = thread.get()
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param update_docker_worker_registry_entry_request: (required)
:type update_docker_worker_registry_entry_request: UpdateDockerWorkerRegistryEntryRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'worker_id',
'update_docker_worker_registry_entry_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_docker_worker_registry_entry_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['worker_id']:
_path_params['workerId'] = _params['worker_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['update_docker_worker_registry_entry_request'] is not None:
_body_params = _params['update_docker_worker_registry_entry_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/docker/worker/{workerId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_scheduled_docker_run_state_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], docker_run_scheduled_update_request : DockerRunScheduledUpdateRequest, **kwargs) -> None: # noqa: E501
"""update_scheduled_docker_run_state_by_id # noqa: E501
Update the state of a scheduled run. This will fail if the state of the scheduled run is LOCKED. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_scheduled_docker_run_state_by_id(dataset_id, worker_id, scheduled_id, docker_run_scheduled_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param docker_run_scheduled_update_request: (required)
:type docker_run_scheduled_update_request: DockerRunScheduledUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_scheduled_docker_run_state_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_scheduled_docker_run_state_by_id_with_http_info(dataset_id, worker_id, scheduled_id, docker_run_scheduled_update_request, **kwargs) # noqa: E501
@validate_arguments
def update_scheduled_docker_run_state_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], worker_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker")], scheduled_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the docker worker run")], docker_run_scheduled_update_request : DockerRunScheduledUpdateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""update_scheduled_docker_run_state_by_id # noqa: E501
Update the state of a scheduled run. This will fail if the state of the scheduled run is LOCKED. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_scheduled_docker_run_state_by_id_with_http_info(dataset_id, worker_id, scheduled_id, docker_run_scheduled_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param worker_id: ObjectId of the docker worker (required)
:type worker_id: str
:param scheduled_id: ObjectId of the docker worker run (required)
:type scheduled_id: str
:param docker_run_scheduled_update_request: (required)
:type docker_run_scheduled_update_request: DockerRunScheduledUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'worker_id',
'scheduled_id',
'docker_run_scheduled_update_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_scheduled_docker_run_state_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['worker_id']:
_path_params['workerId'] = _params['worker_id']
if _params['scheduled_id']:
_path_params['scheduledId'] = _params['scheduled_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['docker_run_scheduled_update_request'] is not None:
_body_params = _params['docker_run_scheduled_update_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/docker/worker/{workerId}/schedule/{scheduledId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 276,374 | 47.42737 | 848 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/embeddings2d_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest
from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class Embeddings2dApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_embeddings2d_by_embedding_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], embedding2d_create_request : Embedding2dCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_embeddings2d_by_embedding_id # noqa: E501
Create a new 2d embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_embeddings2d_by_embedding_id(dataset_id, embedding_id, embedding2d_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param embedding2d_create_request: (required)
:type embedding2d_create_request: Embedding2dCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_embeddings2d_by_embedding_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_embeddings2d_by_embedding_id_with_http_info(dataset_id, embedding_id, embedding2d_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_embeddings2d_by_embedding_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], embedding2d_create_request : Embedding2dCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_embeddings2d_by_embedding_id # noqa: E501
Create a new 2d embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_embeddings2d_by_embedding_id_with_http_info(dataset_id, embedding_id, embedding2d_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param embedding2d_create_request: (required)
:type embedding2d_create_request: Embedding2dCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id',
'embedding2d_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_embeddings2d_by_embedding_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['embedding2d_create_request'] is not None:
_body_params = _params['embedding2d_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/2d', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embedding2d_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], embedding2d_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the 2d embedding")], **kwargs) -> Embedding2dData: # noqa: E501
"""get_embedding2d_by_id # noqa: E501
Get the 2d embeddings by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embedding2d_by_id(dataset_id, embedding_id, embedding2d_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param embedding2d_id: ObjectId of the 2d embedding (required)
:type embedding2d_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: Embedding2dData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embedding2d_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embedding2d_by_id_with_http_info(dataset_id, embedding_id, embedding2d_id, **kwargs) # noqa: E501
@validate_arguments
def get_embedding2d_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], embedding2d_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the 2d embedding")], **kwargs) -> ApiResponse: # noqa: E501
"""get_embedding2d_by_id # noqa: E501
Get the 2d embeddings by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embedding2d_by_id_with_http_info(dataset_id, embedding_id, embedding2d_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param embedding2d_id: ObjectId of the 2d embedding (required)
:type embedding2d_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(Embedding2dData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id',
'embedding2d_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embedding2d_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
if _params['embedding2d_id']:
_path_params['embedding2dId'] = _params['embedding2d_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "Embedding2dData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/2d/{embedding2dId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embeddings2d_by_embedding_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> List[Embedding2dData]: # noqa: E501
"""get_embeddings2d_by_embedding_id # noqa: E501
Get all 2d embeddings of an embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings2d_by_embedding_id(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[Embedding2dData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embeddings2d_by_embedding_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embeddings2d_by_embedding_id_with_http_info(dataset_id, embedding_id, **kwargs) # noqa: E501
@validate_arguments
def get_embeddings2d_by_embedding_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> ApiResponse: # noqa: E501
"""get_embeddings2d_by_embedding_id # noqa: E501
Get all 2d embeddings of an embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings2d_by_embedding_id_with_http_info(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[Embedding2dData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embeddings2d_by_embedding_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[Embedding2dData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/2d', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 24,023 | 44.673004 | 391 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/embeddings_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictStr, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData
from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData
from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest
from lightly.openapi_generated.swagger_client.models.trigger2d_embedding_job_request import Trigger2dEmbeddingJobRequest
from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class EmbeddingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def delete_embedding_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> None: # noqa: E501
"""delete_embedding_by_id # noqa: E501
Deletes a embedding entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_embedding_by_id(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_embedding_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_embedding_by_id_with_http_info(dataset_id, embedding_id, **kwargs) # noqa: E501
@validate_arguments
def delete_embedding_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> ApiResponse: # noqa: E501
"""delete_embedding_by_id # noqa: E501
Deletes a embedding entry by id. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_embedding_by_id_with_http_info(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_embedding_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embeddings_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[DatasetEmbeddingData]: # noqa: E501
"""get_embeddings_by_dataset_id # noqa: E501
Get all annotations of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[DatasetEmbeddingData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embeddings_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embeddings_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_embeddings_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_embeddings_by_dataset_id # noqa: E501
Get all annotations of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[DatasetEmbeddingData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embeddings_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[DatasetEmbeddingData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embeddings_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], mode : Annotated[Optional[StrictStr], Field(description="if we want everything (full) or just the summaries")] = None, **kwargs) -> List[EmbeddingData]: # noqa: E501
"""get_embeddings_by_sample_id # noqa: E501
Get all embeddings of a datasets sample # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_by_sample_id(dataset_id, sample_id, mode, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param mode: if we want everything (full) or just the summaries
:type mode: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[EmbeddingData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embeddings_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embeddings_by_sample_id_with_http_info(dataset_id, sample_id, mode, **kwargs) # noqa: E501
@validate_arguments
def get_embeddings_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], mode : Annotated[Optional[StrictStr], Field(description="if we want everything (full) or just the summaries")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_embeddings_by_sample_id # noqa: E501
Get all embeddings of a datasets sample # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_by_sample_id_with_http_info(dataset_id, sample_id, mode, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param mode: if we want everything (full) or just the summaries
:type mode: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[EmbeddingData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'mode'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embeddings_by_sample_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('mode') is not None: # noqa: E501
_query_params.append((
'mode',
_params['mode'].value if hasattr(_params['mode'], 'value') else _params['mode']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[EmbeddingData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/users/datasets/{datasetId}/samples/{sampleId}/embeddings', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embeddings_csv_read_url_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> str: # noqa: E501
"""get_embeddings_csv_read_url_by_id # noqa: E501
Get the url of a specific embeddings CSV # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_csv_read_url_by_id(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embeddings_csv_read_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embeddings_csv_read_url_by_id_with_http_info(dataset_id, embedding_id, **kwargs) # noqa: E501
@validate_arguments
def get_embeddings_csv_read_url_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], **kwargs) -> ApiResponse: # noqa: E501
"""get_embeddings_csv_read_url_by_id # noqa: E501
Get the url of a specific embeddings CSV # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_csv_read_url_by_id_with_http_info(dataset_id, embedding_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embeddings_csv_read_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/readCSVUrl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_embeddings_csv_write_url_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], name : Annotated[Optional[StrictStr], Field(description="the sampling requests name to create a signed url for")] = None, **kwargs) -> WriteCSVUrlData: # noqa: E501
"""get_embeddings_csv_write_url_by_id # noqa: E501
Get the signed url to upload an CSVembedding to for a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_csv_write_url_by_id(dataset_id, name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param name: the sampling requests name to create a signed url for
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: WriteCSVUrlData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_embeddings_csv_write_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_embeddings_csv_write_url_by_id_with_http_info(dataset_id, name, **kwargs) # noqa: E501
@validate_arguments
def get_embeddings_csv_write_url_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], name : Annotated[Optional[StrictStr], Field(description="the sampling requests name to create a signed url for")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_embeddings_csv_write_url_by_id # noqa: E501
Get the signed url to upload an CSVembedding to for a specific dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_embeddings_csv_write_url_by_id_with_http_info(dataset_id, name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param name: the sampling requests name to create a signed url for
:type name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(WriteCSVUrlData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_embeddings_csv_write_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('name') is not None: # noqa: E501
_query_params.append((
'name',
_params['name'].value if hasattr(_params['name'], 'value') else _params['name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "WriteCSVUrlData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/writeCSVUrl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def set_embeddings_is_processed_flag_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], set_embeddings_is_processed_flag_by_id_body_request : SetEmbeddingsIsProcessedFlagByIdBodyRequest, **kwargs) -> None: # noqa: E501
"""set_embeddings_is_processed_flag_by_id # noqa: E501
Sets the isProcessed flag of the specified embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_embeddings_is_processed_flag_by_id(dataset_id, embedding_id, set_embeddings_is_processed_flag_by_id_body_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param set_embeddings_is_processed_flag_by_id_body_request: (required)
:type set_embeddings_is_processed_flag_by_id_body_request: SetEmbeddingsIsProcessedFlagByIdBodyRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the set_embeddings_is_processed_flag_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.set_embeddings_is_processed_flag_by_id_with_http_info(dataset_id, embedding_id, set_embeddings_is_processed_flag_by_id_body_request, **kwargs) # noqa: E501
@validate_arguments
def set_embeddings_is_processed_flag_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], set_embeddings_is_processed_flag_by_id_body_request : SetEmbeddingsIsProcessedFlagByIdBodyRequest, **kwargs) -> ApiResponse: # noqa: E501
"""set_embeddings_is_processed_flag_by_id # noqa: E501
Sets the isProcessed flag of the specified embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.set_embeddings_is_processed_flag_by_id_with_http_info(dataset_id, embedding_id, set_embeddings_is_processed_flag_by_id_body_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param set_embeddings_is_processed_flag_by_id_body_request: (required)
:type set_embeddings_is_processed_flag_by_id_body_request: SetEmbeddingsIsProcessedFlagByIdBodyRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id',
'set_embeddings_is_processed_flag_by_id_body_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method set_embeddings_is_processed_flag_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['set_embeddings_is_processed_flag_by_id_body_request'] is not None:
_body_params = _params['set_embeddings_is_processed_flag_by_id_body_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/isProcessed', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def trigger2d_embeddings_job(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], trigger2d_embedding_job_request : Trigger2dEmbeddingJobRequest, **kwargs) -> None: # noqa: E501
"""trigger2d_embeddings_job # noqa: E501
Trigger job to get 2d embeddings from embeddings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.trigger2d_embeddings_job(dataset_id, embedding_id, trigger2d_embedding_job_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param trigger2d_embedding_job_request: (required)
:type trigger2d_embedding_job_request: Trigger2dEmbeddingJobRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the trigger2d_embeddings_job_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.trigger2d_embeddings_job_with_http_info(dataset_id, embedding_id, trigger2d_embedding_job_request, **kwargs) # noqa: E501
@validate_arguments
def trigger2d_embeddings_job_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], trigger2d_embedding_job_request : Trigger2dEmbeddingJobRequest, **kwargs) -> ApiResponse: # noqa: E501
"""trigger2d_embeddings_job # noqa: E501
Trigger job to get 2d embeddings from embeddings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.trigger2d_embeddings_job_with_http_info(dataset_id, embedding_id, trigger2d_embedding_job_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param trigger2d_embedding_job_request: (required)
:type trigger2d_embedding_job_request: Trigger2dEmbeddingJobRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id',
'trigger2d_embedding_job_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method trigger2d_embeddings_job" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['trigger2d_embedding_job_request'] is not None:
_body_params = _params['trigger2d_embedding_job_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/trigger2dEmbeddingsJob', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 52,407 | 45.502218 | 405 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/jobs_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictStr
from typing import List
from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData
from lightly.openapi_generated.swagger_client.models.jobs_data import JobsData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class JobsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def get_job_status_by_id(self, job_id : Annotated[StrictStr, Field(..., description="id of the job")], **kwargs) -> JobStatusData: # noqa: E501
"""get_job_status_by_id # noqa: E501
Get status of a specific job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_status_by_id(job_id, async_req=True)
>>> result = thread.get()
:param job_id: id of the job (required)
:type job_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: JobStatusData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_job_status_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_job_status_by_id_with_http_info(job_id, **kwargs) # noqa: E501
@validate_arguments
def get_job_status_by_id_with_http_info(self, job_id : Annotated[StrictStr, Field(..., description="id of the job")], **kwargs) -> ApiResponse: # noqa: E501
"""get_job_status_by_id # noqa: E501
Get status of a specific job # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_job_status_by_id_with_http_info(job_id, async_req=True)
>>> result = thread.get()
:param job_id: id of the job (required)
:type job_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(JobStatusData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'job_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_job_status_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['job_id']:
_path_params['jobId'] = _params['job_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "JobStatusData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/jobs/{jobId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_jobs(self, **kwargs) -> List[JobsData]: # noqa: E501
"""get_jobs # noqa: E501
Get all jobs you have created # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_jobs(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[JobsData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_jobs_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_jobs_with_http_info(**kwargs) # noqa: E501
@validate_arguments
def get_jobs_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
"""get_jobs # noqa: E501
Get all jobs you have created # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_jobs_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[JobsData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_jobs" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[JobsData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/jobs', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 13,183 | 39.318043 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/mappings_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictStr, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class MappingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def get_sample_mappings_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], field : Annotated[StrictStr, Field(..., description="the field to return as the value")], **kwargs) -> List[str]: # noqa: E501
"""get_sample_mappings_by_dataset_id # noqa: E501
Get all samples of a dataset as a list. List index is the index of the sample2bitmask mapping and the value is the 'field' you wanted (e.g _id, fileName) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_mappings_by_dataset_id(dataset_id, field, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param field: the field to return as the value (required)
:type field: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[str]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_mappings_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_mappings_by_dataset_id_with_http_info(dataset_id, field, **kwargs) # noqa: E501
@validate_arguments
def get_sample_mappings_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], field : Annotated[StrictStr, Field(..., description="the field to return as the value")], **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_mappings_by_dataset_id # noqa: E501
Get all samples of a dataset as a list. List index is the index of the sample2bitmask mapping and the value is the 'field' you wanted (e.g _id, fileName) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_mappings_by_dataset_id_with_http_info(dataset_id, field, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param field: the field to return as the value (required)
:type field: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[str], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'field'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_mappings_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('field') is not None: # noqa: E501
_query_params.append((
'field',
_params['field'].value if hasattr(_params['field'], 'value') else _params['field']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[str]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/mappings', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 8,547 | 41.527363 | 288 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/meta_data_configurations_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData
from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class MetaDataConfigurationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_meta_data_configuration(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_set_request : ConfigurationSetRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_meta_data_configuration # noqa: E501
Create a new metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_meta_data_configuration(dataset_id, configuration_set_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_set_request: (required)
:type configuration_set_request: ConfigurationSetRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_meta_data_configuration_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_meta_data_configuration_with_http_info(dataset_id, configuration_set_request, **kwargs) # noqa: E501
@validate_arguments
def create_meta_data_configuration_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_set_request : ConfigurationSetRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_meta_data_configuration # noqa: E501
Create a new metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_meta_data_configuration_with_http_info(dataset_id, configuration_set_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_set_request: (required)
:type configuration_set_request: ConfigurationSetRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'configuration_set_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_meta_data_configuration" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['configuration_set_request'] is not None:
_body_params = _params['configuration_set_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/configuration/metadata', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_meta_data_configuration_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the metadata configuration")], **kwargs) -> ConfigurationData: # noqa: E501
"""get_meta_data_configuration_by_id # noqa: E501
Get a specific metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_meta_data_configuration_by_id(dataset_id, configuration_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_id: ObjectId of the metadata configuration (required)
:type configuration_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ConfigurationData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_meta_data_configuration_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_meta_data_configuration_by_id_with_http_info(dataset_id, configuration_id, **kwargs) # noqa: E501
@validate_arguments
def get_meta_data_configuration_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the metadata configuration")], **kwargs) -> ApiResponse: # noqa: E501
"""get_meta_data_configuration_by_id # noqa: E501
Get a specific metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_meta_data_configuration_by_id_with_http_info(dataset_id, configuration_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_id: ObjectId of the metadata configuration (required)
:type configuration_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ConfigurationData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'configuration_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_meta_data_configuration_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['configuration_id']:
_path_params['configurationId'] = _params['configuration_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "ConfigurationData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/configuration/metadata/{configurationId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_meta_data_configurations(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[ConfigurationData]: # noqa: E501
"""get_meta_data_configurations # noqa: E501
Get the all metadata configurations that exist for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_meta_data_configurations(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[ConfigurationData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_meta_data_configurations_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_meta_data_configurations_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_meta_data_configurations_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_meta_data_configurations # noqa: E501
Get the all metadata configurations that exist for a user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_meta_data_configurations_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[ConfigurationData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_meta_data_configurations" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[ConfigurationData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/configuration/metadata', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_meta_data_configuration_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the metadata configuration")], configuration_set_request : ConfigurationSetRequest, **kwargs) -> None: # noqa: E501
"""update_meta_data_configuration_by_id # noqa: E501
update a specific metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_meta_data_configuration_by_id(dataset_id, configuration_id, configuration_set_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_id: ObjectId of the metadata configuration (required)
:type configuration_id: str
:param configuration_set_request: (required)
:type configuration_set_request: ConfigurationSetRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_meta_data_configuration_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_meta_data_configuration_by_id_with_http_info(dataset_id, configuration_id, configuration_set_request, **kwargs) # noqa: E501
@validate_arguments
def update_meta_data_configuration_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], configuration_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the metadata configuration")], configuration_set_request : ConfigurationSetRequest, **kwargs) -> ApiResponse: # noqa: E501
"""update_meta_data_configuration_by_id # noqa: E501
update a specific metadata configuration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_meta_data_configuration_by_id_with_http_info(dataset_id, configuration_id, configuration_set_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param configuration_id: ObjectId of the metadata configuration (required)
:type configuration_id: str
:param configuration_set_request: (required)
:type configuration_set_request: ConfigurationSetRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'configuration_id',
'configuration_set_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_meta_data_configuration_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['configuration_id']:
_path_params['configurationId'] = _params['configuration_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['configuration_set_request'] is not None:
_body_params = _params['configuration_set_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/configuration/metadata/{configurationId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 30,243 | 44.685801 | 371 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/predictions_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, conint, conlist, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton
from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema
from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class PredictionsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_or_update_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_singleton : conlist(PredictionSingleton), **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_or_update_prediction_by_sample_id # noqa: E501
Create/Update all the prediction singletons for a sampleId in the order/index of them being discovered # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param prediction_singleton: (required)
:type prediction_singleton: List[PredictionSingleton]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_or_update_prediction_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, **kwargs) # noqa: E501
@validate_arguments
def create_or_update_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_singleton : conlist(PredictionSingleton), **kwargs) -> ApiResponse: # noqa: E501
"""create_or_update_prediction_by_sample_id # noqa: E501
Create/Update all the prediction singletons for a sampleId in the order/index of them being discovered # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, prediction_singleton, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param prediction_singleton: (required)
:type prediction_singleton: List[PredictionSingleton]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'prediction_uuid_timestamp',
'prediction_singleton'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_prediction_by_sample_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['prediction_singleton'] is not None:
_body_params = _params['prediction_singleton']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_or_update_prediction_task_schema_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_task_schema : PredictionTaskSchema, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_or_update_prediction_task_schema_by_dataset_id # noqa: E501
Creates/updates a prediction task schema with the task name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_task_schema_by_dataset_id(dataset_id, prediction_uuid_timestamp, prediction_task_schema, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param prediction_task_schema: (required)
:type prediction_task_schema: PredictionTaskSchema
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_or_update_prediction_task_schema_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, prediction_task_schema, **kwargs) # noqa: E501
@validate_arguments
def create_or_update_prediction_task_schema_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], prediction_task_schema : PredictionTaskSchema, **kwargs) -> ApiResponse: # noqa: E501
"""create_or_update_prediction_task_schema_by_dataset_id # noqa: E501
Creates/updates a prediction task schema with the task name # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_prediction_task_schema_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, prediction_task_schema, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param prediction_task_schema: (required)
:type prediction_task_schema: PredictionTaskSchema
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'prediction_uuid_timestamp',
'prediction_task_schema'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_prediction_task_schema_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['prediction_task_schema'] is not None:
_body_params = _params['prediction_task_schema']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_prediction_by_sample_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> List[PredictionSingleton]: # noqa: E501
"""get_prediction_by_sample_id # noqa: E501
Get all prediction singletons of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_by_sample_id(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[PredictionSingleton]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_prediction_by_sample_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
@validate_arguments
def get_prediction_by_sample_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], **kwargs) -> ApiResponse: # noqa: E501
"""get_prediction_by_sample_id # noqa: E501
Get all prediction singletons of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_by_sample_id_with_http_info(dataset_id, sample_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[PredictionSingleton], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'prediction_uuid_timestamp'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_by_sample_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[PredictionSingleton]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples/{sampleId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_prediction_task_schema_by_task_name(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> PredictionTaskSchema: # noqa: E501
"""get_prediction_task_schema_by_task_name # noqa: E501
Get a prediction task schemas named taskName for a datasetId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schema_by_task_name(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param task_name: The prediction task name for which one wants to list the predictions (required)
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PredictionTaskSchema
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_prediction_task_schema_by_task_name_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501
@validate_arguments
def get_prediction_task_schema_by_task_name_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[constr(strict=True, min_length=1), Field(..., description="The prediction task name for which one wants to list the predictions")], **kwargs) -> ApiResponse: # noqa: E501
"""get_prediction_task_schema_by_task_name # noqa: E501
Get a prediction task schemas named taskName for a datasetId # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schema_by_task_name_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param task_name: The prediction task name for which one wants to list the predictions (required)
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PredictionTaskSchema, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'prediction_uuid_timestamp',
'task_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_task_schema_by_task_name" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['task_name']:
_path_params['taskName'] = _params['task_name']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "PredictionTaskSchema",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks/{taskName}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_prediction_task_schemas_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> PredictionTaskSchemas: # noqa: E501
"""get_prediction_task_schemas_by_dataset_id # noqa: E501
Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schemas_by_dataset_id(dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:type prediction_uuid_timestamp: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: PredictionTaskSchemas
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_prediction_task_schemas_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, **kwargs) # noqa: E501
@validate_arguments
def get_prediction_task_schemas_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[Optional[conint(strict=True, ge=0)], Field(description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_prediction_task_schemas_by_dataset_id # noqa: E501
Get list of all the prediction task schemas for a datasetId at a specific predictionUUIDTimestamp. If no predictionUUIDTimestamp is set, it defaults to the newest # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_prediction_task_schemas_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset.
:type prediction_uuid_timestamp: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(PredictionTaskSchemas, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'prediction_uuid_timestamp'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_prediction_task_schemas_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "PredictionTaskSchemas",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/tasks', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_predictions_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> List[List]: # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[List]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_predictions_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, **kwargs) # noqa: E501
@validate_arguments
def get_predictions_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], prediction_uuid_timestamp : Annotated[conint(strict=True, ge=0), Field(..., description="The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. ")], task_name : Annotated[Optional[constr(strict=True, min_length=1)], Field(description="If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_predictions_by_dataset_id # noqa: E501
Get all prediction singletons of all samples of a dataset ordered by the sample mapping # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_predictions_by_dataset_id_with_http_info(dataset_id, prediction_uuid_timestamp, task_name, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param prediction_uuid_timestamp: The timestamp of when the actual predictions were created. This is used as a peg to version predictions. E.g one could upload predictions on day 1 and then create new predictions with an improved model on day 30. One can then upload the new predictions to the same dataset. (required)
:type prediction_uuid_timestamp: int
:param task_name: If provided, only gets all prediction singletons of all samples of a dataset that were yielded by a specific prediction task name
:type task_name: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[List], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'prediction_uuid_timestamp',
'task_name'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_predictions_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('prediction_uuid_timestamp') is not None: # noqa: E501
_query_params.append((
'predictionUUIDTimestamp',
_params['prediction_uuid_timestamp'].value if hasattr(_params['prediction_uuid_timestamp'], 'value') else _params['prediction_uuid_timestamp']
))
if _params.get('task_name') is not None: # noqa: E501
_query_params.append((
'taskName',
_params['task_name'].value if hasattr(_params['task_name'], 'value') else _params['task_name']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[List]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/predictions/samples', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 57,465 | 54.308951 | 789 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/quota_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class QuotaApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def get_quota_maximum_dataset_size(self, **kwargs) -> str: # noqa: E501
"""get_quota_maximum_dataset_size # noqa: E501
Get quota of the current user for the maximum dataset size # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_quota_maximum_dataset_size(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_quota_maximum_dataset_size_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_quota_maximum_dataset_size_with_http_info(**kwargs) # noqa: E501
@validate_arguments
def get_quota_maximum_dataset_size_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
"""get_quota_maximum_dataset_size # noqa: E501
Get quota of the current user for the maximum dataset size # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_quota_maximum_dataset_size_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_quota_maximum_dataset_size" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/quota', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 7,024 | 38.466292 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/samples_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictBool, StrictStr, conint, constr, validator
from typing import List, Optional
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.create_sample_with_write_urls_response import CreateSampleWithWriteUrlsResponse
from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest
from lightly.openapi_generated.swagger_client.models.sample_data import SampleData
from lightly.openapi_generated.swagger_client.models.sample_data_modes import SampleDataModes
from lightly.openapi_generated.swagger_client.models.sample_partial_mode import SamplePartialMode
from lightly.openapi_generated.swagger_client.models.sample_sort_by import SampleSortBy
from lightly.openapi_generated.swagger_client.models.sample_update_request import SampleUpdateRequest
from lightly.openapi_generated.swagger_client.models.sample_write_urls import SampleWriteUrls
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SamplesApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_sample_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_create_request : SampleCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_sample_by_dataset_id # noqa: E501
Create a new sample in a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_sample_by_dataset_id(dataset_id, sample_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_create_request: (required)
:type sample_create_request: SampleCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_sample_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_sample_by_dataset_id_with_http_info(dataset_id, sample_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_sample_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_create_request : SampleCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_sample_by_dataset_id # noqa: E501
Create a new sample in a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_sample_by_dataset_id_with_http_info(dataset_id, sample_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_create_request: (required)
:type sample_create_request: SampleCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_sample_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['sample_create_request'] is not None:
_body_params = _params['sample_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_sample_with_write_urls_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_create_request : SampleCreateRequest, **kwargs) -> CreateSampleWithWriteUrlsResponse: # noqa: E501
"""create_sample_with_write_urls_by_dataset_id # noqa: E501
Create a sample and immediately receive write URLs (full image and thumbnail) to upload images # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_sample_with_write_urls_by_dataset_id(dataset_id, sample_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_create_request: (required)
:type sample_create_request: SampleCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateSampleWithWriteUrlsResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_sample_with_write_urls_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_sample_with_write_urls_by_dataset_id_with_http_info(dataset_id, sample_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_sample_with_write_urls_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_create_request : SampleCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_sample_with_write_urls_by_dataset_id # noqa: E501
Create a sample and immediately receive write URLs (full image and thumbnail) to upload images # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_sample_with_write_urls_by_dataset_id_with_http_info(dataset_id, sample_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_create_request: (required)
:type sample_create_request: SampleCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateSampleWithWriteUrlsResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_sample_with_write_urls_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['sample_create_request'] is not None:
_body_params = _params['sample_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateSampleWithWriteUrlsResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/withWriteUrls', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], **kwargs) -> SampleData: # noqa: E501
"""get_sample_by_id # noqa: E501
Get a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_by_id(dataset_id, sample_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SampleData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_by_id_with_http_info(dataset_id, sample_id, **kwargs) # noqa: E501
@validate_arguments
def get_sample_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_by_id # noqa: E501
Get a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_by_id_with_http_info(dataset_id, sample_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SampleData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "SampleData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_sample_image_read_url_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], type : Annotated[Optional[StrictStr], Field(description="if we want to get the full image or just the thumbnail")] = None, **kwargs) -> str: # noqa: E501
"""get_sample_image_read_url_by_id # noqa: E501
Get the image path of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_read_url_by_id(dataset_id, sample_id, type, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param type: if we want to get the full image or just the thumbnail
:type type: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_image_read_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_image_read_url_by_id_with_http_info(dataset_id, sample_id, type, **kwargs) # noqa: E501
@validate_arguments
def get_sample_image_read_url_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], type : Annotated[Optional[StrictStr], Field(description="if we want to get the full image or just the thumbnail")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_image_read_url_by_id # noqa: E501
Get the image path of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_read_url_by_id_with_http_info(dataset_id, sample_id, type, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param type: if we want to get the full image or just the thumbnail
:type type: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'type'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_image_read_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('type') is not None: # noqa: E501
_query_params.append((
'type',
_params['type'].value if hasattr(_params['type'], 'value') else _params['type']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}/readurl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_sample_image_resource_redirect_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], type : Annotated[StrictStr, Field(..., description="if we want to get the full image or just the thumbnail")], **kwargs) -> None: # noqa: E501
"""get_sample_image_resource_redirect_by_id # noqa: E501
This endpoint enables anyone given the correct credentials to access the actual image directly. By creating a readURL for the resource and redirecting to that URL, the client can use this endpoint to always have a way to access the resource as there is no expiration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_resource_redirect_by_id(dataset_id, sample_id, type, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param type: if we want to get the full image or just the thumbnail (required)
:type type: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_image_resource_redirect_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_image_resource_redirect_by_id_with_http_info(dataset_id, sample_id, type, **kwargs) # noqa: E501
@validate_arguments
def get_sample_image_resource_redirect_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], type : Annotated[StrictStr, Field(..., description="if we want to get the full image or just the thumbnail")], **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_image_resource_redirect_by_id # noqa: E501
This endpoint enables anyone given the correct credentials to access the actual image directly. By creating a readURL for the resource and redirecting to that URL, the client can use this endpoint to always have a way to access the resource as there is no expiration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_resource_redirect_by_id_with_http_info(dataset_id, sample_id, type, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param type: if we want to get the full image or just the thumbnail (required)
:type type: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'type'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_image_resource_redirect_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('type') is not None: # noqa: E501
_query_params.append((
'type',
_params['type'].value if hasattr(_params['type'], 'value') else _params['type']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['ApiPublicJWTAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}/readurlRedirect', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_sample_image_write_url_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], is_thumbnail : Annotated[StrictBool, Field(..., description="Whether or not the image to upload is a thumbnail")], **kwargs) -> str: # noqa: E501
"""get_sample_image_write_url_by_id # noqa: E501
Get the signed url to upload an image to for a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_write_url_by_id(dataset_id, sample_id, is_thumbnail, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param is_thumbnail: Whether or not the image to upload is a thumbnail (required)
:type is_thumbnail: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_image_write_url_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_image_write_url_by_id_with_http_info(dataset_id, sample_id, is_thumbnail, **kwargs) # noqa: E501
@validate_arguments
def get_sample_image_write_url_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], is_thumbnail : Annotated[StrictBool, Field(..., description="Whether or not the image to upload is a thumbnail")], **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_image_write_url_by_id # noqa: E501
Get the signed url to upload an image to for a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_write_url_by_id_with_http_info(dataset_id, sample_id, is_thumbnail, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param is_thumbnail: Whether or not the image to upload is a thumbnail (required)
:type is_thumbnail: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'is_thumbnail'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_image_write_url_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('is_thumbnail') is not None: # noqa: E501
_query_params.append((
'isThumbnail',
_params['is_thumbnail'].value if hasattr(_params['is_thumbnail'], 'value') else _params['is_thumbnail']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}/writeurl', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_sample_image_write_urls_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], **kwargs) -> SampleWriteUrls: # noqa: E501
"""get_sample_image_write_urls_by_id # noqa: E501
Get all signed write URLs to upload all images (full image and thumbnail) of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_write_urls_by_id(dataset_id, sample_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: SampleWriteUrls
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_sample_image_write_urls_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_sample_image_write_urls_by_id_with_http_info(dataset_id, sample_id, **kwargs) # noqa: E501
@validate_arguments
def get_sample_image_write_urls_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], **kwargs) -> ApiResponse: # noqa: E501
"""get_sample_image_write_urls_by_id # noqa: E501
Get all signed write URLs to upload all images (full image and thumbnail) of a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_sample_image_write_urls_by_id_with_http_info(dataset_id, sample_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(SampleWriteUrls, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_sample_image_write_urls_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "SampleWriteUrls",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}/writeurls', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_samples_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[SampleData]: # noqa: E501
"""get_samples_by_dataset_id # noqa: E501
Get all samples of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_samples_by_dataset_id(dataset_id, file_name, sort_by, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: filter the samples by filename
:type file_name: str
:param sort_by: sort the samples
:type sort_by: SampleSortBy
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[SampleData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_samples_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_samples_by_dataset_id_with_http_info(dataset_id, file_name, sort_by, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_samples_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, sort_by : Annotated[Optional[SampleSortBy], Field(description="sort the samples")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_samples_by_dataset_id # noqa: E501
Get all samples of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_samples_by_dataset_id_with_http_info(dataset_id, file_name, sort_by, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param file_name: filter the samples by filename
:type file_name: str
:param sort_by: sort the samples
:type sort_by: SampleSortBy
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[SampleData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'file_name',
'sort_by',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_samples_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
if _params.get('sort_by') is not None: # noqa: E501
_query_params.append((
'sortBy',
_params['sort_by'].value if hasattr(_params['sort_by'], 'value') else _params['sort_by']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[SampleData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_samples_partial_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], mode : Optional[SamplePartialMode] = None, file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[SampleDataModes]: # noqa: E501
"""get_samples_partial_by_dataset_id # noqa: E501
Get partial information of all samples of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_samples_partial_by_dataset_id(dataset_id, mode, file_name, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param mode:
:type mode: SamplePartialMode
:param file_name: filter the samples by filename
:type file_name: str
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[SampleDataModes]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_samples_partial_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_samples_partial_by_dataset_id_with_http_info(dataset_id, mode, file_name, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def get_samples_partial_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], mode : Optional[SamplePartialMode] = None, file_name : Annotated[Optional[StrictStr], Field(description="filter the samples by filename")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_samples_partial_by_dataset_id # noqa: E501
Get partial information of all samples of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_samples_partial_by_dataset_id_with_http_info(dataset_id, mode, file_name, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param mode:
:type mode: SamplePartialMode
:param file_name: filter the samples by filename
:type file_name: str
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[SampleDataModes], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'mode',
'file_name',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_samples_partial_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
if _params.get('mode') is not None: # noqa: E501
_query_params.append((
'mode',
_params['mode'].value if hasattr(_params['mode'], 'value') else _params['mode']
))
if _params.get('file_name') is not None: # noqa: E501
_query_params.append((
'fileName',
_params['file_name'].value if hasattr(_params['file_name'], 'value') else _params['file_name']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[SampleDataModes]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/partial', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_sample_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Optional[StrictBool] = None, **kwargs) -> None: # noqa: E501
"""update_sample_by_id # noqa: E501
update a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_sample_by_id(dataset_id, sample_id, sample_update_request, enable_dataset_update, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param sample_update_request: The updated sample to set (required)
:type sample_update_request: SampleUpdateRequest
:param enable_dataset_update:
:type enable_dataset_update: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_sample_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_sample_by_id_with_http_info(dataset_id, sample_id, sample_update_request, enable_dataset_update, **kwargs) # noqa: E501
@validate_arguments
def update_sample_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], sample_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the sample")], sample_update_request : Annotated[SampleUpdateRequest, Field(..., description="The updated sample to set")], enable_dataset_update : Optional[StrictBool] = None, **kwargs) -> ApiResponse: # noqa: E501
"""update_sample_by_id # noqa: E501
update a specific sample of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_sample_by_id_with_http_info(dataset_id, sample_id, sample_update_request, enable_dataset_update, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param sample_id: ObjectId of the sample (required)
:type sample_id: str
:param sample_update_request: The updated sample to set (required)
:type sample_update_request: SampleUpdateRequest
:param enable_dataset_update:
:type enable_dataset_update: bool
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'sample_id',
'sample_update_request',
'enable_dataset_update'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_sample_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['sample_id']:
_path_params['sampleId'] = _params['sample_id']
# process the query parameters
_query_params = []
if _params.get('enable_dataset_update') is not None: # noqa: E501
_query_params.append((
'enableDatasetUpdate',
_params['enable_dataset_update'].value if hasattr(_params['enable_dataset_update'], 'value') else _params['enable_dataset_update']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['sample_update_request'] is not None:
_body_params = _params['sample_update_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/samples/{sampleId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 80,646 | 46.467334 | 640 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/samplings_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, constr, validator
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class SamplingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def trigger_sampling_by_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], sampling_create_request : SamplingCreateRequest, **kwargs) -> AsyncTaskData: # noqa: E501
"""trigger_sampling_by_id # noqa: E501
Trigger a sampling on a specific tag of a dataset with specific prior uploaded csv embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.trigger_sampling_by_id(dataset_id, embedding_id, sampling_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param sampling_create_request: (required)
:type sampling_create_request: SamplingCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: AsyncTaskData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the trigger_sampling_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.trigger_sampling_by_id_with_http_info(dataset_id, embedding_id, sampling_create_request, **kwargs) # noqa: E501
@validate_arguments
def trigger_sampling_by_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], embedding_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the embedding")], sampling_create_request : SamplingCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""trigger_sampling_by_id # noqa: E501
Trigger a sampling on a specific tag of a dataset with specific prior uploaded csv embedding # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.trigger_sampling_by_id_with_http_info(dataset_id, embedding_id, sampling_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param embedding_id: ObjectId of the embedding (required)
:type embedding_id: str
:param sampling_create_request: (required)
:type sampling_create_request: SamplingCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(AsyncTaskData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'embedding_id',
'sampling_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method trigger_sampling_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['embedding_id']:
_path_params['embeddingId'] = _params['embedding_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['sampling_create_request'] is not None:
_body_params = _params['sampling_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "AsyncTaskData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/embeddings/{embeddingId}/sampling', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 9,319 | 42.755869 | 336 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/scores_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest
from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.tag_active_learning_scores_data import TagActiveLearningScoresData
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ScoresApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_or_update_active_learning_score_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_or_update_active_learning_score_by_tag_id # noqa: E501
Create or update active learning score object by tag id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_active_learning_score_by_tag_id(dataset_id, tag_id, active_learning_score_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param active_learning_score_create_request: (required)
:type active_learning_score_create_request: ActiveLearningScoreCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_or_update_active_learning_score_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_or_update_active_learning_score_by_tag_id_with_http_info(dataset_id, tag_id, active_learning_score_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_or_update_active_learning_score_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], active_learning_score_create_request : ActiveLearningScoreCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_or_update_active_learning_score_by_tag_id # noqa: E501
Create or update active learning score object by tag id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_or_update_active_learning_score_by_tag_id_with_http_info(dataset_id, tag_id, active_learning_score_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param active_learning_score_create_request: (required)
:type active_learning_score_create_request: ActiveLearningScoreCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'active_learning_score_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_or_update_active_learning_score_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['active_learning_score_create_request'] is not None:
_body_params = _params['active_learning_score_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/scores', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_active_learning_score_by_score_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], score_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the scores")], **kwargs) -> ActiveLearningScoreData: # noqa: E501
"""get_active_learning_score_by_score_id # noqa: E501
Get active learning score object by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_learning_score_by_score_id(dataset_id, tag_id, score_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param score_id: ObjectId of the scores (required)
:type score_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ActiveLearningScoreData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_active_learning_score_by_score_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_active_learning_score_by_score_id_with_http_info(dataset_id, tag_id, score_id, **kwargs) # noqa: E501
@validate_arguments
def get_active_learning_score_by_score_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], score_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the scores")], **kwargs) -> ApiResponse: # noqa: E501
"""get_active_learning_score_by_score_id # noqa: E501
Get active learning score object by id # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_learning_score_by_score_id_with_http_info(dataset_id, tag_id, score_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param score_id: ObjectId of the scores (required)
:type score_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ActiveLearningScoreData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'score_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_active_learning_score_by_score_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
if _params['score_id']:
_path_params['scoreId'] = _params['score_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "ActiveLearningScoreData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/scores/{scoreId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_active_learning_scores_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> List[TagActiveLearningScoresData]: # noqa: E501
"""get_active_learning_scores_by_tag_id # noqa: E501
Get all scoreIds for the given tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_learning_scores_by_tag_id(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[TagActiveLearningScoresData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_active_learning_scores_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_active_learning_scores_by_tag_id_with_http_info(dataset_id, tag_id, **kwargs) # noqa: E501
@validate_arguments
def get_active_learning_scores_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501
"""get_active_learning_scores_by_tag_id # noqa: E501
Get all scoreIds for the given tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_active_learning_scores_by_tag_id_with_http_info(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[TagActiveLearningScoresData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_active_learning_scores_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[TagActiveLearningScoresData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/scores', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 24,340 | 45.187856 | 383 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/tags_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictBool, StrictInt, StrictStr, conint, constr, validator
from typing import List, Optional, Union
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat
from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat
from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl
from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest
from lightly.openapi_generated.swagger_client.models.label_box_data_row import LabelBoxDataRow
from lightly.openapi_generated.swagger_client.models.label_box_v4_data_row import LabelBoxV4DataRow
from lightly.openapi_generated.swagger_client.models.label_studio_task import LabelStudioTask
from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_request import TagArithmeticsRequest
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_response import TagArithmeticsResponse
from lightly.openapi_generated.swagger_client.models.tag_bit_mask_response import TagBitMaskResponse
from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest
from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TagsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def create_initial_tag_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], initial_tag_create_request : InitialTagCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_initial_tag_by_dataset_id # noqa: E501
create the intitial tag for a dataset which then locks the dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_initial_tag_by_dataset_id(dataset_id, initial_tag_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param initial_tag_create_request: (required)
:type initial_tag_create_request: InitialTagCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_initial_tag_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_initial_tag_by_dataset_id_with_http_info(dataset_id, initial_tag_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_initial_tag_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], initial_tag_create_request : InitialTagCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_initial_tag_by_dataset_id # noqa: E501
create the intitial tag for a dataset which then locks the dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_initial_tag_by_dataset_id_with_http_info(dataset_id, initial_tag_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param initial_tag_create_request: (required)
:type initial_tag_create_request: InitialTagCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'initial_tag_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_initial_tag_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['initial_tag_create_request'] is not None:
_body_params = _params['initial_tag_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/initial', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def create_tag_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_create_request : TagCreateRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""create_tag_by_dataset_id # noqa: E501
create new tag for dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag_by_dataset_id(dataset_id, tag_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_create_request: (required)
:type tag_create_request: TagCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the create_tag_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.create_tag_by_dataset_id_with_http_info(dataset_id, tag_create_request, **kwargs) # noqa: E501
@validate_arguments
def create_tag_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_create_request : TagCreateRequest, **kwargs) -> ApiResponse: # noqa: E501
"""create_tag_by_dataset_id # noqa: E501
create new tag for dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_tag_by_dataset_id_with_http_info(dataset_id, tag_create_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_create_request: (required)
:type tag_create_request: TagCreateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_create_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_tag_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['tag_create_request'] is not None:
_body_params = _params['tag_create_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def delete_tag_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> None: # noqa: E501
"""delete_tag_by_tag_id # noqa: E501
delete a specific tag if its a leaf-tag (e.g is not a dependency of another tag) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag_by_tag_id(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_tag_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_tag_by_tag_id_with_http_info(dataset_id, tag_id, **kwargs) # noqa: E501
@validate_arguments
def delete_tag_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501
"""delete_tag_by_tag_id # noqa: E501
delete a specific tag if its a leaf-tag (e.g is not a dependency of another tag) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_tag_by_tag_id_with_http_info(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_tag_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def download_zip_of_samples_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> bytearray: # noqa: E501
"""download_zip_of_samples_by_tag_id # noqa: E501
Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_zip_of_samples_by_tag_id(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: bytearray
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the download_zip_of_samples_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.download_zip_of_samples_by_tag_id_with_http_info(dataset_id, tag_id, **kwargs) # noqa: E501
@validate_arguments
def download_zip_of_samples_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501
"""download_zip_of_samples_by_tag_id # noqa: E501
Download a zip file of the samples of a tag. Limited to 1000 images # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_zip_of_samples_by_tag_id_with_http_info(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(bytearray, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method download_zip_of_samples_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/zip', 'application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "bytearray",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
'413': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/zip', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_basic_filenames(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> str: # noqa: E501
"""export_tag_to_basic_filenames # noqa: E501
Export the samples filenames of a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_basic_filenames(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_basic_filenames_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_basic_filenames_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_basic_filenames_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""export_tag_to_basic_filenames # noqa: E501
Export the samples filenames of a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_basic_filenames_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'expires_in',
'access_control',
'file_name_format',
'include_meta_data',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_basic_filenames" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('expires_in') is not None: # noqa: E501
_query_params.append((
'expiresIn',
_params['expires_in'].value if hasattr(_params['expires_in'], 'value') else _params['expires_in']
))
if _params.get('access_control') is not None: # noqa: E501
_query_params.append((
'accessControl',
_params['access_control'].value if hasattr(_params['access_control'], 'value') else _params['access_control']
))
if _params.get('file_name_format') is not None: # noqa: E501
_query_params.append((
'fileNameFormat',
_params['file_name_format'].value if hasattr(_params['file_name_format'], 'value') else _params['file_name_format']
))
if _params.get('include_meta_data') is not None: # noqa: E501
_query_params.append((
'includeMetaData',
_params['include_meta_data'].value if hasattr(_params['include_meta_data'], 'value') else _params['include_meta_data']
))
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/basic/filenames', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_basic_filenames_and_read_urls(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[FilenameAndReadUrl]: # noqa: E501
"""export_tag_to_basic_filenames_and_read_urls # noqa: E501
Export the samples filenames to map with their readURL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_basic_filenames_and_read_urls(dataset_id, tag_id, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[FilenameAndReadUrl]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_basic_filenames_and_read_urls_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_basic_filenames_and_read_urls_with_http_info(dataset_id, tag_id, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_basic_filenames_and_read_urls_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""export_tag_to_basic_filenames_and_read_urls # noqa: E501
Export the samples filenames to map with their readURL. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_basic_filenames_and_read_urls_with_http_info(dataset_id, tag_id, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[FilenameAndReadUrl], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_basic_filenames_and_read_urls" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[FilenameAndReadUrl]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/basic/filenamesAndReadUrls', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_label_box_data_rows(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[LabelBoxDataRow]: # noqa: E501
"""(Deprecated) export_tag_to_label_box_data_rows # noqa: E501
Deprecated. Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_box_data_rows(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[LabelBoxDataRow]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_label_box_data_rows_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_label_box_data_rows_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_label_box_data_rows_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) export_tag_to_label_box_data_rows # noqa: E501
Deprecated. Please use V4 unless there is a specific need to use the LabelBox V3 API. Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v3/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_box_data_rows_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[LabelBoxDataRow], status_code(int), headers(HTTPHeaderDict))
"""
warnings.warn("GET /v1/datasets/{datasetId}/tags/{tagId}/export/LabelBox/datarows is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'expires_in',
'access_control',
'file_name_format',
'include_meta_data',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_label_box_data_rows" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('expires_in') is not None: # noqa: E501
_query_params.append((
'expiresIn',
_params['expires_in'].value if hasattr(_params['expires_in'], 'value') else _params['expires_in']
))
if _params.get('access_control') is not None: # noqa: E501
_query_params.append((
'accessControl',
_params['access_control'].value if hasattr(_params['access_control'], 'value') else _params['access_control']
))
if _params.get('file_name_format') is not None: # noqa: E501
_query_params.append((
'fileNameFormat',
_params['file_name_format'].value if hasattr(_params['file_name_format'], 'value') else _params['file_name_format']
))
if _params.get('include_meta_data') is not None: # noqa: E501
_query_params.append((
'includeMetaData',
_params['include_meta_data'].value if hasattr(_params['include_meta_data'], 'value') else _params['include_meta_data']
))
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[LabelBoxDataRow]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/LabelBox/datarows', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_label_box_v4_data_rows(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[LabelBoxV4DataRow]: # noqa: E501
"""export_tag_to_label_box_v4_data_rows # noqa: E501
Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v4/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_box_v4_data_rows(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[LabelBoxV4DataRow]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_label_box_v4_data_rows_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_label_box_v4_data_rows_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_label_box_v4_data_rows_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""export_tag_to_label_box_v4_data_rows # noqa: E501
Export samples of a tag as a json for importing into LabelBox as outlined here; https://docs.labelbox.com/v4/reference/image ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_box_v4_data_rows_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[LabelBoxV4DataRow], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'expires_in',
'access_control',
'file_name_format',
'include_meta_data',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_label_box_v4_data_rows" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('expires_in') is not None: # noqa: E501
_query_params.append((
'expiresIn',
_params['expires_in'].value if hasattr(_params['expires_in'], 'value') else _params['expires_in']
))
if _params.get('access_control') is not None: # noqa: E501
_query_params.append((
'accessControl',
_params['access_control'].value if hasattr(_params['access_control'], 'value') else _params['access_control']
))
if _params.get('file_name_format') is not None: # noqa: E501
_query_params.append((
'fileNameFormat',
_params['file_name_format'].value if hasattr(_params['file_name_format'], 'value') else _params['file_name_format']
))
if _params.get('include_meta_data') is not None: # noqa: E501
_query_params.append((
'includeMetaData',
_params['include_meta_data'].value if hasattr(_params['include_meta_data'], 'value') else _params['include_meta_data']
))
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[LabelBoxV4DataRow]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/LabelBoxV4/datarows', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_label_studio_tasks(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[LabelStudioTask]: # noqa: E501
"""export_tag_to_label_studio_tasks # noqa: E501
Export samples of a tag as a json for importing into LabelStudio as outlined here; https://labelstud.io/guide/tasks.html#Basic-Label-Studio-JSON-format ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_studio_tasks(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[LabelStudioTask]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_label_studio_tasks_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_label_studio_tasks_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_label_studio_tasks_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""export_tag_to_label_studio_tasks # noqa: E501
Export samples of a tag as a json for importing into LabelStudio as outlined here; https://labelstud.io/guide/tasks.html#Basic-Label-Studio-JSON-format ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_label_studio_tasks_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[LabelStudioTask], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'expires_in',
'access_control',
'file_name_format',
'include_meta_data',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_label_studio_tasks" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('expires_in') is not None: # noqa: E501
_query_params.append((
'expiresIn',
_params['expires_in'].value if hasattr(_params['expires_in'], 'value') else _params['expires_in']
))
if _params.get('access_control') is not None: # noqa: E501
_query_params.append((
'accessControl',
_params['access_control'].value if hasattr(_params['access_control'], 'value') else _params['access_control']
))
if _params.get('file_name_format') is not None: # noqa: E501
_query_params.append((
'fileNameFormat',
_params['file_name_format'].value if hasattr(_params['file_name_format'], 'value') else _params['file_name_format']
))
if _params.get('include_meta_data') is not None: # noqa: E501
_query_params.append((
'includeMetaData',
_params['include_meta_data'].value if hasattr(_params['include_meta_data'], 'value') else _params['include_meta_data']
))
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[LabelStudioTask]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/LabelStudio/tasks', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def export_tag_to_sama_tasks(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> List[SamaTask]: # noqa: E501
"""export_tag_to_sama_tasks # noqa: E501
Export samples of a tag as a json for importing into Sama as tasks with the upload form or via the API as outlined here; - https://docs.sama.com/reference/taskcreate - https://docs.sama.com/reference/createbatch ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_sama_tasks(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[SamaTask]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the export_tag_to_sama_tasks_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.export_tag_to_sama_tasks_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, **kwargs) # noqa: E501
@validate_arguments
def export_tag_to_sama_tasks_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], expires_in : Annotated[Optional[StrictInt], Field(description="If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely. ")] = None, access_control : Annotated[Optional[StrictStr], Field(description="which access control name to be used")] = None, file_name_format : Optional[FileNameFormat] = None, include_meta_data : Annotated[Optional[StrictBool], Field(description="if true, will also include metadata")] = None, format : Optional[FileOutputFormat] = None, preview_example : Annotated[Optional[StrictBool], Field(description="if true, will generate a preview example of how the structure will look")] = None, page_size : Annotated[Optional[conint(strict=True, ge=1)], Field(description="pagination size/limit of the number of samples to return")] = None, page_offset : Annotated[Optional[conint(strict=True, ge=0)], Field(description="pagination offset")] = None, **kwargs) -> ApiResponse: # noqa: E501
"""export_tag_to_sama_tasks # noqa: E501
Export samples of a tag as a json for importing into Sama as tasks with the upload form or via the API as outlined here; - https://docs.sama.com/reference/taskcreate - https://docs.sama.com/reference/createbatch ```openapi\\+warning The image URLs are special in that the resource can be accessed by anyone in possession of said URL for the time specified by the expiresIn query param ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.export_tag_to_sama_tasks_with_http_info(dataset_id, tag_id, expires_in, access_control, file_name_format, include_meta_data, format, preview_example, page_size, page_offset, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param expires_in: If defined, the URLs provided will only be valid for amount of seconds from time of issuence. If not defined, the URls will be valid indefinitely.
:type expires_in: int
:param access_control: which access control name to be used
:type access_control: str
:param file_name_format:
:type file_name_format: FileNameFormat
:param include_meta_data: if true, will also include metadata
:type include_meta_data: bool
:param format:
:type format: FileOutputFormat
:param preview_example: if true, will generate a preview example of how the structure will look
:type preview_example: bool
:param page_size: pagination size/limit of the number of samples to return
:type page_size: int
:param page_offset: pagination offset
:type page_offset: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[SamaTask], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'expires_in',
'access_control',
'file_name_format',
'include_meta_data',
'format',
'preview_example',
'page_size',
'page_offset'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method export_tag_to_sama_tasks" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
if _params.get('expires_in') is not None: # noqa: E501
_query_params.append((
'expiresIn',
_params['expires_in'].value if hasattr(_params['expires_in'], 'value') else _params['expires_in']
))
if _params.get('access_control') is not None: # noqa: E501
_query_params.append((
'accessControl',
_params['access_control'].value if hasattr(_params['access_control'], 'value') else _params['access_control']
))
if _params.get('file_name_format') is not None: # noqa: E501
_query_params.append((
'fileNameFormat',
_params['file_name_format'].value if hasattr(_params['file_name_format'], 'value') else _params['file_name_format']
))
if _params.get('include_meta_data') is not None: # noqa: E501
_query_params.append((
'includeMetaData',
_params['include_meta_data'].value if hasattr(_params['include_meta_data'], 'value') else _params['include_meta_data']
))
if _params.get('format') is not None: # noqa: E501
_query_params.append((
'format',
_params['format'].value if hasattr(_params['format'], 'value') else _params['format']
))
if _params.get('preview_example') is not None: # noqa: E501
_query_params.append((
'previewExample',
_params['preview_example'].value if hasattr(_params['preview_example'], 'value') else _params['preview_example']
))
if _params.get('page_size') is not None: # noqa: E501
_query_params.append((
'pageSize',
_params['page_size'].value if hasattr(_params['page_size'], 'value') else _params['page_size']
))
if _params.get('page_offset') is not None: # noqa: E501
_query_params.append((
'pageOffset',
_params['page_offset'].value if hasattr(_params['page_offset'], 'value') else _params['page_offset']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[SamaTask]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/export/Sama/tasks', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_filenames_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> List[str]: # noqa: E501
"""(Deprecated) get_filenames_by_tag_id # noqa: E501
Get list of filenames by tag. Deprecated, please use # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_filenames_by_tag_id(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[str]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_filenames_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_filenames_by_tag_id_with_http_info(dataset_id, tag_id, **kwargs) # noqa: E501
@validate_arguments
def get_filenames_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) get_filenames_by_tag_id # noqa: E501
Get list of filenames by tag. Deprecated, please use # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_filenames_by_tag_id_with_http_info(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[str], status_code(int), headers(HTTPHeaderDict))
"""
warnings.warn("GET /v1/datasets/{datasetId}/tags/{tagId}/filenames is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'dataset_id',
'tag_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_filenames_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[str]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}/filenames', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_tag_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> TagData: # noqa: E501
"""get_tag_by_tag_id # noqa: E501
Get information about a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_by_tag_id(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TagData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_tag_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_tag_by_tag_id_with_http_info(dataset_id, tag_id, **kwargs) # noqa: E501
@validate_arguments
def get_tag_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], **kwargs) -> ApiResponse: # noqa: E501
"""get_tag_by_tag_id # noqa: E501
Get information about a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tag_by_tag_id_with_http_info(dataset_id, tag_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TagData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tag_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "TagData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_tags_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> List[TagData]: # noqa: E501
"""get_tags_by_dataset_id # noqa: E501
Get all tags of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tags_by_dataset_id(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[TagData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_tags_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_tags_by_dataset_id_with_http_info(dataset_id, **kwargs) # noqa: E501
@validate_arguments
def get_tags_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], **kwargs) -> ApiResponse: # noqa: E501
"""get_tags_by_dataset_id # noqa: E501
Get all tags of a dataset # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tags_by_dataset_id_with_http_info(dataset_id, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[TagData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_tags_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[TagData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def perform_tag_arithmetics(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> TagArithmeticsResponse: # noqa: E501
"""perform_tag_arithmetics # noqa: E501
performs tag arithmetics to compute a new bitmask out of two existing tags and optionally create a tag for it # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.perform_tag_arithmetics(dataset_id, tag_arithmetics_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_arithmetics_request: (required)
:type tag_arithmetics_request: TagArithmeticsRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TagArithmeticsResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the perform_tag_arithmetics_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.perform_tag_arithmetics_with_http_info(dataset_id, tag_arithmetics_request, **kwargs) # noqa: E501
@validate_arguments
def perform_tag_arithmetics_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> ApiResponse: # noqa: E501
"""perform_tag_arithmetics # noqa: E501
performs tag arithmetics to compute a new bitmask out of two existing tags and optionally create a tag for it # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.perform_tag_arithmetics_with_http_info(dataset_id, tag_arithmetics_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_arithmetics_request: (required)
:type tag_arithmetics_request: TagArithmeticsRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TagArithmeticsResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_arithmetics_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method perform_tag_arithmetics" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['tag_arithmetics_request'] is not None:
_body_params = _params['tag_arithmetics_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "TagArithmeticsResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/arithmetics', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def perform_tag_arithmetics_bitmask(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> TagBitMaskResponse: # noqa: E501
"""(Deprecated) perform_tag_arithmetics_bitmask # noqa: E501
Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.perform_tag_arithmetics_bitmask(dataset_id, tag_arithmetics_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_arithmetics_request: (required)
:type tag_arithmetics_request: TagArithmeticsRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TagBitMaskResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the perform_tag_arithmetics_bitmask_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.perform_tag_arithmetics_bitmask_with_http_info(dataset_id, tag_arithmetics_request, **kwargs) # noqa: E501
@validate_arguments
def perform_tag_arithmetics_bitmask_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_arithmetics_request : TagArithmeticsRequest, **kwargs) -> ApiResponse: # noqa: E501
"""(Deprecated) perform_tag_arithmetics_bitmask # noqa: E501
Performs tag arithmetics to compute a new bitmask out of two existing tags. Does not create a new tag regardless if newTagName is provided # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.perform_tag_arithmetics_bitmask_with_http_info(dataset_id, tag_arithmetics_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_arithmetics_request: (required)
:type tag_arithmetics_request: TagArithmeticsRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TagBitMaskResponse, status_code(int), headers(HTTPHeaderDict))
"""
warnings.warn("POST /v1/datasets/{datasetId}/tags/arithmetics/bitmask is deprecated.", DeprecationWarning)
_params = locals()
_all_params = [
'dataset_id',
'tag_arithmetics_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method perform_tag_arithmetics_bitmask" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['tag_arithmetics_request'] is not None:
_body_params = _params['tag_arithmetics_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "TagBitMaskResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/arithmetics/bitmask', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_tag_by_tag_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], tag_update_request : Annotated[TagUpdateRequest, Field(..., description="updated data for tag")], **kwargs) -> None: # noqa: E501
"""update_tag_by_tag_id # noqa: E501
update information about a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag_by_tag_id(dataset_id, tag_id, tag_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param tag_update_request: updated data for tag (required)
:type tag_update_request: TagUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_tag_by_tag_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_tag_by_tag_id_with_http_info(dataset_id, tag_id, tag_update_request, **kwargs) # noqa: E501
@validate_arguments
def update_tag_by_tag_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the tag")], tag_update_request : Annotated[TagUpdateRequest, Field(..., description="updated data for tag")], **kwargs) -> ApiResponse: # noqa: E501
"""update_tag_by_tag_id # noqa: E501
update information about a specific tag # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_tag_by_tag_id_with_http_info(dataset_id, tag_id, tag_update_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_id: ObjectId of the tag (required)
:type tag_id: str
:param tag_update_request: updated data for tag (required)
:type tag_update_request: TagUpdateRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_id',
'tag_update_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_tag_by_tag_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
if _params['tag_id']:
_path_params['tagId'] = _params['tag_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['tag_update_request'] is not None:
_body_params = _params['tag_update_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/{tagId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def upsize_tags_by_dataset_id(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_upsize_request : TagUpsizeRequest, **kwargs) -> CreateEntityResponse: # noqa: E501
"""upsize_tags_by_dataset_id # noqa: E501
Upsize all tags for the dataset to the current size of the dataset. Use this after adding more samples to a dataset with an initial-tag. | Creates a new tag holding all samples which are not yet in the initial-tag. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsize_tags_by_dataset_id(dataset_id, tag_upsize_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_upsize_request: (required)
:type tag_upsize_request: TagUpsizeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: CreateEntityResponse
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the upsize_tags_by_dataset_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.upsize_tags_by_dataset_id_with_http_info(dataset_id, tag_upsize_request, **kwargs) # noqa: E501
@validate_arguments
def upsize_tags_by_dataset_id_with_http_info(self, dataset_id : Annotated[constr(strict=True), Field(..., description="ObjectId of the dataset")], tag_upsize_request : TagUpsizeRequest, **kwargs) -> ApiResponse: # noqa: E501
"""upsize_tags_by_dataset_id # noqa: E501
Upsize all tags for the dataset to the current size of the dataset. Use this after adding more samples to a dataset with an initial-tag. | Creates a new tag holding all samples which are not yet in the initial-tag. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upsize_tags_by_dataset_id_with_http_info(dataset_id, tag_upsize_request, async_req=True)
>>> result = thread.get()
:param dataset_id: ObjectId of the dataset (required)
:type dataset_id: str
:param tag_upsize_request: (required)
:type tag_upsize_request: TagUpsizeRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(CreateEntityResponse, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'dataset_id',
'tag_upsize_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upsize_tags_by_dataset_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['dataset_id']:
_path_params['datasetId'] = _params['dataset_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['tag_upsize_request'] is not None:
_body_params = _params['tag_upsize_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'201': "CreateEntityResponse",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/datasets/{datasetId}/tags/upsize', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 160,062 | 49.797525 | 1,247 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/teams_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import Field, StrictStr, constr, validator
from typing import List
from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.models.profile_basic_data import ProfileBasicData
from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData
from lightly.openapi_generated.swagger_client.models.team_data import TeamData
from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class TeamsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def add_team_member(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], create_team_membership_request : CreateTeamMembershipRequest, **kwargs) -> None: # noqa: E501
"""add_team_member # noqa: E501
Add a team member. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_team_member(team_id, create_team_membership_request, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param create_team_membership_request: (required)
:type create_team_membership_request: CreateTeamMembershipRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the add_team_member_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.add_team_member_with_http_info(team_id, create_team_membership_request, **kwargs) # noqa: E501
@validate_arguments
def add_team_member_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], create_team_membership_request : CreateTeamMembershipRequest, **kwargs) -> ApiResponse: # noqa: E501
"""add_team_member # noqa: E501
Add a team member. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_team_member_with_http_info(team_id, create_team_membership_request, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param create_team_membership_request: (required)
:type create_team_membership_request: CreateTeamMembershipRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'team_id',
'create_team_membership_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method add_team_member" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['create_team_membership_request'] is not None:
_body_params = _params['create_team_membership_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/teams/{teamId}/members', 'POST',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def delete_team_member_by_id(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], user_id : Annotated[StrictStr, Field(..., description="id of the user")], **kwargs) -> None: # noqa: E501
"""delete_team_member_by_id # noqa: E501
Deletes a member from a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_team_member_by_id(team_id, user_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param user_id: id of the user (required)
:type user_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the delete_team_member_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.delete_team_member_by_id_with_http_info(team_id, user_id, **kwargs) # noqa: E501
@validate_arguments
def delete_team_member_by_id_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], user_id : Annotated[StrictStr, Field(..., description="id of the user")], **kwargs) -> ApiResponse: # noqa: E501
"""delete_team_member_by_id # noqa: E501
Deletes a member from a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_team_member_by_id_with_http_info(team_id, user_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param user_id: id of the user (required)
:type user_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'team_id',
'user_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_team_member_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
if _params['user_id']:
_path_params['userId'] = _params['user_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/teams/{teamId}/members/{userId}', 'DELETE',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_service_accounts_by_team_id(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> List[ServiceAccountBasicData]: # noqa: E501
"""get_service_accounts_by_team_id # noqa: E501
Get the service accounts of a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_accounts_by_team_id(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[ServiceAccountBasicData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_service_accounts_by_team_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_service_accounts_by_team_id_with_http_info(team_id, **kwargs) # noqa: E501
@validate_arguments
def get_service_accounts_by_team_id_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> ApiResponse: # noqa: E501
"""get_service_accounts_by_team_id # noqa: E501
Get the service accounts of a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_service_accounts_by_team_id_with_http_info(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[ServiceAccountBasicData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'team_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_service_accounts_by_team_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[ServiceAccountBasicData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/teams/{teamId}/serviceaccounts', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_team_by_id(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> TeamData: # noqa: E501
"""get_team_by_id # noqa: E501
Get basic team information by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_by_id(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: TeamData
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_team_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_team_by_id_with_http_info(team_id, **kwargs) # noqa: E501
@validate_arguments
def get_team_by_id_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> ApiResponse: # noqa: E501
"""get_team_by_id # noqa: E501
Get basic team information by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_by_id_with_http_info(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(TeamData, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'team_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_team_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "TeamData",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/teams/{teamId}', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_team_members_by_id(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> List[ProfileBasicData]: # noqa: E501
"""get_team_members_by_id # noqa: E501
Get the members of a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_members_by_id(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: List[ProfileBasicData]
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_team_members_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_team_members_by_id_with_http_info(team_id, **kwargs) # noqa: E501
@validate_arguments
def get_team_members_by_id_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], **kwargs) -> ApiResponse: # noqa: E501
"""get_team_members_by_id # noqa: E501
Get the members of a team. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_team_members_by_id_with_http_info(team_id, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(List[ProfileBasicData], status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'team_id'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_team_members_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {
'200': "List[ProfileBasicData]",
'400': "ApiErrorResponse",
'401': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/teams/{teamId}/members', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def update_team_member_by_id(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], user_id : Annotated[StrictStr, Field(..., description="id of the user")], update_team_membership_request : UpdateTeamMembershipRequest, **kwargs) -> None: # noqa: E501
"""update_team_member_by_id # noqa: E501
Update the team membership of a user. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team_member_by_id(team_id, user_id, update_team_membership_request, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param user_id: id of the user (required)
:type user_id: str
:param update_team_membership_request: (required)
:type update_team_membership_request: UpdateTeamMembershipRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the update_team_member_by_id_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.update_team_member_by_id_with_http_info(team_id, user_id, update_team_membership_request, **kwargs) # noqa: E501
@validate_arguments
def update_team_member_by_id_with_http_info(self, team_id : Annotated[constr(strict=True), Field(..., description="id of the team")], user_id : Annotated[StrictStr, Field(..., description="id of the user")], update_team_membership_request : UpdateTeamMembershipRequest, **kwargs) -> ApiResponse: # noqa: E501
"""update_team_member_by_id # noqa: E501
Update the team membership of a user. One needs to be part of the team to do so. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_team_member_by_id_with_http_info(team_id, user_id, update_team_membership_request, async_req=True)
>>> result = thread.get()
:param team_id: id of the team (required)
:type team_id: str
:param user_id: id of the user (required)
:type user_id: str
:param update_team_membership_request: (required)
:type update_team_membership_request: UpdateTeamMembershipRequest
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
_params = locals()
_all_params = [
'team_id',
'user_id',
'update_team_membership_request'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_team_member_by_id" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
if _params['team_id']:
_path_params['teamId'] = _params['team_id']
if _params['user_id']:
_path_params['userId'] = _params['user_id']
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
if _params['update_team_membership_request'] is not None:
_body_params = _params['update_team_membership_request']
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# set the HTTP header `Content-Type`
_content_types_list = _params.get('_content_type',
self.api_client.select_header_content_type(
['application/json']))
if _content_types_list:
_header_params['Content-Type'] = _content_types_list
# authentication setting
_auth_settings = ['auth0Bearer', 'ApiKeyAuth'] # noqa: E501
_response_types_map = {}
return self.api_client.call_api(
'/v1/teams/{teamId}/members/{userId}', 'PUT',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 41,554 | 43.301706 | 313 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/api/versioning_api.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import re # noqa: F401
import io
import warnings
from pydantic import validate_arguments, ValidationError
from typing_extensions import Annotated
from pydantic import StrictStr
from typing import Optional
from lightly.openapi_generated.swagger_client.api_client import ApiClient
from lightly.openapi_generated.swagger_client.api_response import ApiResponse
from lightly.openapi_generated.swagger_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class VersioningApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient.get_default()
self.api_client = api_client
@validate_arguments
def get_latest_pip_version(self, current_version : Optional[StrictStr] = None, **kwargs) -> str: # noqa: E501
"""get_latest_pip_version # noqa: E501
Get latest pip version available # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_latest_pip_version(current_version, async_req=True)
>>> result = thread.get()
:param current_version:
:type current_version: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_latest_pip_version_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_latest_pip_version_with_http_info(current_version, **kwargs) # noqa: E501
@validate_arguments
def get_latest_pip_version_with_http_info(self, current_version : Optional[StrictStr] = None, **kwargs) -> ApiResponse: # noqa: E501
"""get_latest_pip_version # noqa: E501
Get latest pip version available # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_latest_pip_version_with_http_info(current_version, async_req=True)
>>> result = thread.get()
:param current_version:
:type current_version: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
'current_version'
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_latest_pip_version" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
if _params.get('current_version') is not None: # noqa: E501
_query_params.append((
'currentVersion',
_params['current_version'].value if hasattr(_params['current_version'], 'value') else _params['current_version']
))
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = [] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/versions/pip/latest', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
@validate_arguments
def get_minimum_compatible_pip_version(self, **kwargs) -> str: # noqa: E501
"""get_minimum_compatible_pip_version # noqa: E501
Get minimum pip version needed for compatability # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_minimum_compatible_pip_version(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: str
"""
kwargs['_return_http_data_only'] = True
if '_preload_content' in kwargs:
raise ValueError("Error! Please call the get_minimum_compatible_pip_version_with_http_info method with `_preload_content` instead and obtain raw data from ApiResponse.raw_data")
return self.get_minimum_compatible_pip_version_with_http_info(**kwargs) # noqa: E501
@validate_arguments
def get_minimum_compatible_pip_version_with_http_info(self, **kwargs) -> ApiResponse: # noqa: E501
"""get_minimum_compatible_pip_version # noqa: E501
Get minimum pip version needed for compatability # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_minimum_compatible_pip_version_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the ApiResponse.data will
be set to none and raw_data will store the
HTTP response body without reading/decoding.
Default is True.
:type _preload_content: bool, optional
:param _return_http_data_only: response data instead of ApiResponse
object with status code, headers, etc
:type _return_http_data_only: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:type _content_type: string, optional: force content-type for the request
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(str, status_code(int), headers(HTTPHeaderDict))
"""
_params = locals()
_all_params = [
]
_all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'_content_type',
'_headers'
]
)
# validate the arguments
for _key, _val in _params['kwargs'].items():
if _key not in _all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_minimum_compatible_pip_version" % _key
)
_params[_key] = _val
del _params['kwargs']
_collection_formats = {}
# process the path parameters
_path_params = {}
# process the query parameters
_query_params = []
# process the header parameters
_header_params = dict(_params.get('_headers', {}))
# process the form parameters
_form_params = []
_files = {}
# process the body parameter
_body_params = None
# set the HTTP header `Accept`
_header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# authentication setting
_auth_settings = [] # noqa: E501
_response_types_map = {
'200': "str",
'400': "ApiErrorResponse",
'403': "ApiErrorResponse",
'404': "ApiErrorResponse",
}
return self.api_client.call_api(
'/v1/versions/pip/minimum', 'GET',
_path_params,
_query_params,
_header_params,
body=_body_params,
post_params=_form_params,
files=_files,
response_types_map=_response_types_map,
auth_settings=_auth_settings,
async_req=_params.get('async_req'),
_return_http_data_only=_params.get('_return_http_data_only'), # noqa: E501
_preload_content=_params.get('_preload_content', True),
_request_timeout=_params.get('_request_timeout'),
collection_formats=_collection_formats,
_request_auth=_params.get('_request_auth'))
| 13,290 | 39.769939 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/__init__.py | # coding: utf-8
# flake8: noqa
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
# import models into model package
from lightly.openapi_generated.swagger_client.models.active_learning_score_create_request import ActiveLearningScoreCreateRequest
from lightly.openapi_generated.swagger_client.models.active_learning_score_data import ActiveLearningScoreData
from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode
from lightly.openapi_generated.swagger_client.models.api_error_response import ApiErrorResponse
from lightly.openapi_generated.swagger_client.models.async_task_data import AsyncTaskData
from lightly.openapi_generated.swagger_client.models.configuration_data import ConfigurationData
from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry
from lightly.openapi_generated.swagger_client.models.configuration_set_request import ConfigurationSetRequest
from lightly.openapi_generated.swagger_client.models.configuration_value_data_type import ConfigurationValueDataType
from lightly.openapi_generated.swagger_client.models.create_cf_bucket_activity_request import CreateCFBucketActivityRequest
from lightly.openapi_generated.swagger_client.models.create_docker_worker_registry_entry_request import CreateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.models.create_entity_response import CreateEntityResponse
from lightly.openapi_generated.swagger_client.models.create_sample_with_write_urls_response import CreateSampleWithWriteUrlsResponse
from lightly.openapi_generated.swagger_client.models.create_team_membership_request import CreateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.models.creator import Creator
from lightly.openapi_generated.swagger_client.models.crop_data import CropData
from lightly.openapi_generated.swagger_client.models.dataset_create_request import DatasetCreateRequest
from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator
from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData
from lightly.openapi_generated.swagger_client.models.dataset_data_enriched import DatasetDataEnriched
from lightly.openapi_generated.swagger_client.models.dataset_embedding_data import DatasetEmbeddingData
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.dataset_update_request import DatasetUpdateRequest
from lightly.openapi_generated.swagger_client.models.datasource_config import DatasourceConfig
from lightly.openapi_generated.swagger_client.models.datasource_config_azure import DatasourceConfigAzure
from lightly.openapi_generated.swagger_client.models.datasource_config_azure_all_of import DatasourceConfigAzureAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase
from lightly.openapi_generated.swagger_client.models.datasource_config_gcs import DatasourceConfigGCS
from lightly.openapi_generated.swagger_client.models.datasource_config_gcs_all_of import DatasourceConfigGCSAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_lightly import DatasourceConfigLIGHTLY
from lightly.openapi_generated.swagger_client.models.datasource_config_local import DatasourceConfigLOCAL
from lightly.openapi_generated.swagger_client.models.datasource_config_obs import DatasourceConfigOBS
from lightly.openapi_generated.swagger_client.models.datasource_config_obs_all_of import DatasourceConfigOBSAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_s3 import DatasourceConfigS3
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_all_of import DatasourceConfigS3AllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access import DatasourceConfigS3DelegatedAccess
from lightly.openapi_generated.swagger_client.models.datasource_config_s3_delegated_access_all_of import DatasourceConfigS3DelegatedAccessAllOf
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data import DatasourceConfigVerifyData
from lightly.openapi_generated.swagger_client.models.datasource_config_verify_data_errors import DatasourceConfigVerifyDataErrors
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_request import DatasourceProcessedUntilTimestampRequest
from lightly.openapi_generated.swagger_client.models.datasource_processed_until_timestamp_response import DatasourceProcessedUntilTimestampResponse
from lightly.openapi_generated.swagger_client.models.datasource_purpose import DatasourcePurpose
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data import DatasourceRawSamplesData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_data_row import DatasourceRawSamplesDataRow
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data import DatasourceRawSamplesMetadataData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_metadata_data_row import DatasourceRawSamplesMetadataDataRow
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data import DatasourceRawSamplesPredictionsData
from lightly.openapi_generated.swagger_client.models.datasource_raw_samples_predictions_data_row import DatasourceRawSamplesPredictionsDataRow
from lightly.openapi_generated.swagger_client.models.dimensionality_reduction_method import DimensionalityReductionMethod
from lightly.openapi_generated.swagger_client.models.docker_license_information import DockerLicenseInformation
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_create_request import DockerRunArtifactCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_created_data import DockerRunArtifactCreatedData
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_data import DockerRunArtifactData
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_storage_location import DockerRunArtifactStorageLocation
from lightly.openapi_generated.swagger_client.models.docker_run_artifact_type import DockerRunArtifactType
from lightly.openapi_generated.swagger_client.models.docker_run_create_request import DockerRunCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_data import DockerRunData
from lightly.openapi_generated.swagger_client.models.docker_run_log_data import DockerRunLogData
from lightly.openapi_generated.swagger_client.models.docker_run_log_entry_data import DockerRunLogEntryData
from lightly.openapi_generated.swagger_client.models.docker_run_log_level import DockerRunLogLevel
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_create_request import DockerRunScheduledCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_data import DockerRunScheduledData
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_priority import DockerRunScheduledPriority
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_state import DockerRunScheduledState
from lightly.openapi_generated.swagger_client.models.docker_run_scheduled_update_request import DockerRunScheduledUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_run_state import DockerRunState
from lightly.openapi_generated.swagger_client.models.docker_run_update_request import DockerRunUpdateRequest
from lightly.openapi_generated.swagger_client.models.docker_task_description import DockerTaskDescription
from lightly.openapi_generated.swagger_client.models.docker_user_stats import DockerUserStats
from lightly.openapi_generated.swagger_client.models.docker_worker_config import DockerWorkerConfig
from lightly.openapi_generated.swagger_client.models.docker_worker_config_create_request import DockerWorkerConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_data import DockerWorkerConfigData
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2 import DockerWorkerConfigV2
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_create_request import DockerWorkerConfigV2CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_data import DockerWorkerConfigV2Data
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker import DockerWorkerConfigV2Docker
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_object_level import DockerWorkerConfigV2DockerObjectLevel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_docker_stopping_condition import DockerWorkerConfigV2DockerStoppingCondition
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly import DockerWorkerConfigV2Lightly
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_collate import DockerWorkerConfigV2LightlyCollate
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_model import DockerWorkerConfigV2LightlyModel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v2_lightly_trainer import DockerWorkerConfigV2LightlyTrainer
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3 import DockerWorkerConfigV3
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_create_request import DockerWorkerConfigV3CreateRequest
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_data import DockerWorkerConfigV3Data
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker import DockerWorkerConfigV3Docker
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_corruptness_check import DockerWorkerConfigV3DockerCorruptnessCheck
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_datasource import DockerWorkerConfigV3DockerDatasource
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_docker_training import DockerWorkerConfigV3DockerTraining
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly import DockerWorkerConfigV3Lightly
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_checkpoint_callback import DockerWorkerConfigV3LightlyCheckpointCallback
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_collate import DockerWorkerConfigV3LightlyCollate
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_criterion import DockerWorkerConfigV3LightlyCriterion
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_loader import DockerWorkerConfigV3LightlyLoader
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_model import DockerWorkerConfigV3LightlyModel
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_optimizer import DockerWorkerConfigV3LightlyOptimizer
from lightly.openapi_generated.swagger_client.models.docker_worker_config_v3_lightly_trainer import DockerWorkerConfigV3LightlyTrainer
from lightly.openapi_generated.swagger_client.models.docker_worker_registry_entry_data import DockerWorkerRegistryEntryData
from lightly.openapi_generated.swagger_client.models.docker_worker_state import DockerWorkerState
from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType
from lightly.openapi_generated.swagger_client.models.embedding2d_create_request import Embedding2dCreateRequest
from lightly.openapi_generated.swagger_client.models.embedding2d_data import Embedding2dData
from lightly.openapi_generated.swagger_client.models.embedding_data import EmbeddingData
from lightly.openapi_generated.swagger_client.models.file_name_format import FileNameFormat
from lightly.openapi_generated.swagger_client.models.file_output_format import FileOutputFormat
from lightly.openapi_generated.swagger_client.models.filename_and_read_url import FilenameAndReadUrl
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest
from lightly.openapi_generated.swagger_client.models.job_result_type import JobResultType
from lightly.openapi_generated.swagger_client.models.job_state import JobState
from lightly.openapi_generated.swagger_client.models.job_status_data import JobStatusData
from lightly.openapi_generated.swagger_client.models.job_status_data_result import JobStatusDataResult
from lightly.openapi_generated.swagger_client.models.job_status_meta import JobStatusMeta
from lightly.openapi_generated.swagger_client.models.job_status_upload_method import JobStatusUploadMethod
from lightly.openapi_generated.swagger_client.models.jobs_data import JobsData
from lightly.openapi_generated.swagger_client.models.label_box_data_row import LabelBoxDataRow
from lightly.openapi_generated.swagger_client.models.label_box_v4_data_row import LabelBoxV4DataRow
from lightly.openapi_generated.swagger_client.models.label_studio_task import LabelStudioTask
from lightly.openapi_generated.swagger_client.models.label_studio_task_data import LabelStudioTaskData
from lightly.openapi_generated.swagger_client.models.lightly_docker_selection_method import LightlyDockerSelectionMethod
from lightly.openapi_generated.swagger_client.models.lightly_model_v2 import LightlyModelV2
from lightly.openapi_generated.swagger_client.models.lightly_model_v3 import LightlyModelV3
from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v2 import LightlyTrainerPrecisionV2
from lightly.openapi_generated.swagger_client.models.lightly_trainer_precision_v3 import LightlyTrainerPrecisionV3
from lightly.openapi_generated.swagger_client.models.prediction_singleton import PredictionSingleton
from lightly.openapi_generated.swagger_client.models.prediction_singleton_base import PredictionSingletonBase
from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification import PredictionSingletonClassification
from lightly.openapi_generated.swagger_client.models.prediction_singleton_classification_all_of import PredictionSingletonClassificationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation import PredictionSingletonInstanceSegmentation
from lightly.openapi_generated.swagger_client.models.prediction_singleton_instance_segmentation_all_of import PredictionSingletonInstanceSegmentationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection import PredictionSingletonKeypointDetection
from lightly.openapi_generated.swagger_client.models.prediction_singleton_keypoint_detection_all_of import PredictionSingletonKeypointDetectionAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection import PredictionSingletonObjectDetection
from lightly.openapi_generated.swagger_client.models.prediction_singleton_object_detection_all_of import PredictionSingletonObjectDetectionAllOf
from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation import PredictionSingletonSemanticSegmentation
from lightly.openapi_generated.swagger_client.models.prediction_singleton_semantic_segmentation_all_of import PredictionSingletonSemanticSegmentationAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema import PredictionTaskSchema
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_base import PredictionTaskSchemaBase
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category import PredictionTaskSchemaCategory
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints import PredictionTaskSchemaCategoryKeypoints
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_category_keypoints_all_of import PredictionTaskSchemaCategoryKeypointsAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint import PredictionTaskSchemaKeypoint
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_keypoint_all_of import PredictionTaskSchemaKeypointAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple import PredictionTaskSchemaSimple
from lightly.openapi_generated.swagger_client.models.prediction_task_schema_simple_all_of import PredictionTaskSchemaSimpleAllOf
from lightly.openapi_generated.swagger_client.models.prediction_task_schemas import PredictionTaskSchemas
from lightly.openapi_generated.swagger_client.models.questionnaire_data import QuestionnaireData
from lightly.openapi_generated.swagger_client.models.s3_region import S3Region
from lightly.openapi_generated.swagger_client.models.sama_task import SamaTask
from lightly.openapi_generated.swagger_client.models.sama_task_data import SamaTaskData
from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest
from lightly.openapi_generated.swagger_client.models.sample_data import SampleData
from lightly.openapi_generated.swagger_client.models.sample_data_modes import SampleDataModes
from lightly.openapi_generated.swagger_client.models.sample_meta_data import SampleMetaData
from lightly.openapi_generated.swagger_client.models.sample_partial_mode import SamplePartialMode
from lightly.openapi_generated.swagger_client.models.sample_sort_by import SampleSortBy
from lightly.openapi_generated.swagger_client.models.sample_type import SampleType
from lightly.openapi_generated.swagger_client.models.sample_update_request import SampleUpdateRequest
from lightly.openapi_generated.swagger_client.models.sample_write_urls import SampleWriteUrls
from lightly.openapi_generated.swagger_client.models.sampling_config import SamplingConfig
from lightly.openapi_generated.swagger_client.models.sampling_config_stopping_condition import SamplingConfigStoppingCondition
from lightly.openapi_generated.swagger_client.models.sampling_create_request import SamplingCreateRequest
from lightly.openapi_generated.swagger_client.models.sampling_method import SamplingMethod
from lightly.openapi_generated.swagger_client.models.sector import Sector
from lightly.openapi_generated.swagger_client.models.selection_config import SelectionConfig
from lightly.openapi_generated.swagger_client.models.selection_config_entry import SelectionConfigEntry
from lightly.openapi_generated.swagger_client.models.selection_config_entry_input import SelectionConfigEntryInput
from lightly.openapi_generated.swagger_client.models.selection_config_entry_strategy import SelectionConfigEntryStrategy
from lightly.openapi_generated.swagger_client.models.selection_input_predictions_name import SelectionInputPredictionsName
from lightly.openapi_generated.swagger_client.models.selection_input_type import SelectionInputType
from lightly.openapi_generated.swagger_client.models.selection_strategy_threshold_operation import SelectionStrategyThresholdOperation
from lightly.openapi_generated.swagger_client.models.selection_strategy_type import SelectionStrategyType
from lightly.openapi_generated.swagger_client.models.service_account_basic_data import ServiceAccountBasicData
from lightly.openapi_generated.swagger_client.models.set_embeddings_is_processed_flag_by_id_body_request import SetEmbeddingsIsProcessedFlagByIdBodyRequest
from lightly.openapi_generated.swagger_client.models.shared_access_config_create_request import SharedAccessConfigCreateRequest
from lightly.openapi_generated.swagger_client.models.shared_access_config_data import SharedAccessConfigData
from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType
from lightly.openapi_generated.swagger_client.models.tag_active_learning_scores_data import TagActiveLearningScoresData
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_operation import TagArithmeticsOperation
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_request import TagArithmeticsRequest
from lightly.openapi_generated.swagger_client.models.tag_arithmetics_response import TagArithmeticsResponse
from lightly.openapi_generated.swagger_client.models.tag_bit_mask_response import TagBitMaskResponse
from lightly.openapi_generated.swagger_client.models.tag_change_data import TagChangeData
from lightly.openapi_generated.swagger_client.models.tag_change_data_arithmetics import TagChangeDataArithmetics
from lightly.openapi_generated.swagger_client.models.tag_change_data_initial import TagChangeDataInitial
from lightly.openapi_generated.swagger_client.models.tag_change_data_metadata import TagChangeDataMetadata
from lightly.openapi_generated.swagger_client.models.tag_change_data_operation_method import TagChangeDataOperationMethod
from lightly.openapi_generated.swagger_client.models.tag_change_data_rename import TagChangeDataRename
from lightly.openapi_generated.swagger_client.models.tag_change_data_sampler import TagChangeDataSampler
from lightly.openapi_generated.swagger_client.models.tag_change_data_samples import TagChangeDataSamples
from lightly.openapi_generated.swagger_client.models.tag_change_data_scatterplot import TagChangeDataScatterplot
from lightly.openapi_generated.swagger_client.models.tag_change_data_upsize import TagChangeDataUpsize
from lightly.openapi_generated.swagger_client.models.tag_change_entry import TagChangeEntry
from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest
from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator
from lightly.openapi_generated.swagger_client.models.tag_data import TagData
from lightly.openapi_generated.swagger_client.models.tag_update_request import TagUpdateRequest
from lightly.openapi_generated.swagger_client.models.tag_upsize_request import TagUpsizeRequest
from lightly.openapi_generated.swagger_client.models.task_type import TaskType
from lightly.openapi_generated.swagger_client.models.team_basic_data import TeamBasicData
from lightly.openapi_generated.swagger_client.models.team_data import TeamData
from lightly.openapi_generated.swagger_client.models.team_role import TeamRole
from lightly.openapi_generated.swagger_client.models.trigger2d_embedding_job_request import Trigger2dEmbeddingJobRequest
from lightly.openapi_generated.swagger_client.models.update_docker_worker_registry_entry_request import UpdateDockerWorkerRegistryEntryRequest
from lightly.openapi_generated.swagger_client.models.update_team_membership_request import UpdateTeamMembershipRequest
from lightly.openapi_generated.swagger_client.models.user_type import UserType
from lightly.openapi_generated.swagger_client.models.video_frame_data import VideoFrameData
from lightly.openapi_generated.swagger_client.models.write_csv_url_data import WriteCSVUrlData
| 23,629 | 103.557522 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/active_learning_score_create_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Union
from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conlist, constr, validator
class ActiveLearningScoreCreateRequest(BaseModel):
"""
ActiveLearningScoreCreateRequest
"""
score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score")
scores: conlist(Union[StrictFloat, StrictInt], min_items=1) = Field(..., description="Array of active learning scores")
__properties = ["scoreType", "scores"]
@validator('score_type')
def score_type_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ActiveLearningScoreCreateRequest:
"""Create an instance of ActiveLearningScoreCreateRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ActiveLearningScoreCreateRequest:
"""Create an instance of ActiveLearningScoreCreateRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ActiveLearningScoreCreateRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ActiveLearningScoreCreateRequest) in the input: " + str(obj))
_obj = ActiveLearningScoreCreateRequest.parse_obj({
"score_type": obj.get("scoreType"),
"scores": obj.get("scores")
})
return _obj
| 3,231 | 35.727273 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/active_learning_score_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Union
from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conint, conlist, constr, validator
class ActiveLearningScoreData(BaseModel):
"""
ActiveLearningScoreData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
tag_id: constr(strict=True) = Field(..., alias="tagId", description="MongoDB ObjectId")
score_type: constr(strict=True, min_length=1) = Field(..., alias="scoreType", description="Type of active learning score")
scores: conlist(Union[StrictFloat, StrictInt], min_items=1) = Field(..., description="Array of active learning scores")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
__properties = ["id", "tagId", "scoreType", "scores", "createdAt"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('tag_id')
def tag_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('score_type')
def score_type_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]*$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]*$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ActiveLearningScoreData:
"""Create an instance of ActiveLearningScoreData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ActiveLearningScoreData:
"""Create an instance of ActiveLearningScoreData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ActiveLearningScoreData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ActiveLearningScoreData) in the input: " + str(obj))
_obj = ActiveLearningScoreData.parse_obj({
"id": obj.get("id"),
"tag_id": obj.get("tagId"),
"score_type": obj.get("scoreType"),
"scores": obj.get("scores"),
"created_at": obj.get("createdAt")
})
return _obj
| 4,160 | 37.527778 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/annotation_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, StrictStr, conint
from lightly.openapi_generated.swagger_client.models.annotation_meta_data import AnnotationMetaData
from lightly.openapi_generated.swagger_client.models.annotation_offer_data import AnnotationOfferData
from lightly.openapi_generated.swagger_client.models.annotation_state import AnnotationState
class AnnotationData(BaseModel):
"""
AnnotationData
"""
id: StrictStr = Field(..., alias="_id")
state: AnnotationState = Field(...)
dataset_id: StrictStr = Field(..., alias="datasetId")
tag_id: StrictStr = Field(..., alias="tagId")
partner_id: Optional[StrictStr] = Field(None, alias="partnerId")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
meta: AnnotationMetaData = Field(...)
offer: Optional[AnnotationOfferData] = None
__properties = ["_id", "state", "datasetId", "tagId", "partnerId", "createdAt", "lastModifiedAt", "meta", "offer"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> AnnotationData:
"""Create an instance of AnnotationData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of meta
if self.meta:
_dict['meta' if by_alias else 'meta'] = self.meta.to_dict(by_alias=by_alias)
# override the default output from pydantic by calling `to_dict()` of offer
if self.offer:
_dict['offer' if by_alias else 'offer'] = self.offer.to_dict(by_alias=by_alias)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> AnnotationData:
"""Create an instance of AnnotationData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return AnnotationData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in AnnotationData) in the input: " + str(obj))
_obj = AnnotationData.parse_obj({
"id": obj.get("_id"),
"state": obj.get("state"),
"dataset_id": obj.get("datasetId"),
"tag_id": obj.get("tagId"),
"partner_id": obj.get("partnerId"),
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt"),
"meta": AnnotationMetaData.from_dict(obj.get("meta")) if obj.get("meta") is not None else None,
"offer": AnnotationOfferData.from_dict(obj.get("offer")) if obj.get("offer") is not None else None
})
return _obj
| 4,279 | 40.153846 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/annotation_meta_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, StrictStr
class AnnotationMetaData(BaseModel):
"""
AnnotationMetaData
"""
description: Optional[StrictStr] = None
__properties = ["description"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> AnnotationMetaData:
"""Create an instance of AnnotationMetaData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> AnnotationMetaData:
"""Create an instance of AnnotationMetaData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return AnnotationMetaData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in AnnotationMetaData) in the input: " + str(obj))
_obj = AnnotationMetaData.parse_obj({
"description": obj.get("description")
})
return _obj
| 2,485 | 30.468354 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/annotation_offer_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional, Union
from pydantic import Extra, BaseModel, Field, StrictFloat, StrictInt, conint
class AnnotationOfferData(BaseModel):
"""
AnnotationOfferData
"""
cost: Optional[Union[StrictFloat, StrictInt]] = None
completed_by: Optional[conint(strict=True, ge=0)] = Field(None, alias="completedBy", description="unix timestamp in milliseconds")
__properties = ["cost", "completedBy"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> AnnotationOfferData:
"""Create an instance of AnnotationOfferData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> AnnotationOfferData:
"""Create an instance of AnnotationOfferData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return AnnotationOfferData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in AnnotationOfferData) in the input: " + str(obj))
_obj = AnnotationOfferData.parse_obj({
"cost": obj.get("cost"),
"completed_by": obj.get("completedBy")
})
return _obj
| 2,723 | 32.62963 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/annotation_state.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class AnnotationState(str, Enum):
"""
AnnotationState
"""
"""
allowed enum values
"""
DRAFT = 'DRAFT'
OFFER_REQUESTED = 'OFFER_REQUESTED'
OFFER_RETURNED = 'OFFER_RETURNED'
ACCEPTED = 'ACCEPTED'
ACTIVE = 'ACTIVE'
COMPLETED = 'COMPLETED'
@classmethod
def from_json(cls, json_str: str) -> 'AnnotationState':
"""Create an instance of AnnotationState from a JSON string"""
return AnnotationState(json.loads(json_str))
| 1,057 | 21.510638 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/api_error_code.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class ApiErrorCode(str, Enum):
"""
ApiErrorCode
"""
"""
allowed enum values
"""
BAD_REQUEST = 'BAD_REQUEST'
NOT_IMPLEMENTED = 'NOT_IMPLEMENTED'
FORBIDDEN = 'FORBIDDEN'
UNAUTHORIZED = 'UNAUTHORIZED'
NOT_FOUND = 'NOT_FOUND'
NOT_MODIFIED = 'NOT_MODIFIED'
MALFORMED_REQUEST = 'MALFORMED_REQUEST'
MALFORMED_RESPONSE = 'MALFORMED_RESPONSE'
PAYLOAD_TOO_LARGE = 'PAYLOAD_TOO_LARGE'
JWT_INVALID = 'JWT_INVALID'
JWT_MALFORMED = 'JWT_MALFORMED'
CREATION_FAILED = 'CREATION_FAILED'
JOB_CREATION_FAILED = 'JOB_CREATION_FAILED'
JOB_UNKNOWN = 'JOB_UNKNOWN'
USER_NOT_KNOWN = 'USER_NOT_KNOWN'
USER_ACCOUNT_DEACTIVATED = 'USER_ACCOUNT_DEACTIVATED'
USER_ACCOUNT_BLOCKED = 'USER_ACCOUNT_BLOCKED'
TEAM_ACCOUNT_PLAN_INSUFFICIENT = 'TEAM_ACCOUNT_PLAN_INSUFFICIENT'
ILLEGAL_ACTION_RESOURCE_IN_USE = 'ILLEGAL_ACTION_RESOURCE_IN_USE'
DATASET_UNKNOWN = 'DATASET_UNKNOWN'
DATASET_NOT_SUPPORTED = 'DATASET_NOT_SUPPORTED'
DATASET_TAG_INVALID = 'DATASET_TAG_INVALID'
DATASET_NAME_EXISTS = 'DATASET_NAME_EXISTS'
DATASET_AT_MAX_CAPACITY = 'DATASET_AT_MAX_CAPACITY'
DATASET_DATASOURCE_UNKNOWN = 'DATASET_DATASOURCE_UNKNOWN'
DATASET_DATASOURCE_CREDENTIALS_ERROR = 'DATASET_DATASOURCE_CREDENTIALS_ERROR'
DATASET_DATASOURCE_INVALID = 'DATASET_DATASOURCE_INVALID'
DATASET_DATASOURCE_ACTION_NOT_IMPLEMENTED = 'DATASET_DATASOURCE_ACTION_NOT_IMPLEMENTED'
DATASET_DATASOURCE_ILLEGAL_ACTION = 'DATASET_DATASOURCE_ILLEGAL_ACTION'
DATASET_DATASOURCE_FILE_TOO_LARGE = 'DATASET_DATASOURCE_FILE_TOO_LARGE'
DATASET_DATASOURCE_RELEVANT_FILENAMES_INVALID = 'DATASET_DATASOURCE_RELEVANT_FILENAMES_INVALID'
ACCESS_CONTROL_UNKNOWN = 'ACCESS_CONTROL_UNKNOWN'
EMBEDDING_UNKNOWN = 'EMBEDDING_UNKNOWN'
EMBEDDING_NAME_EXISTS = 'EMBEDDING_NAME_EXISTS'
EMBEDDING_INVALID = 'EMBEDDING_INVALID'
EMBEDDING_NOT_READY = 'EMBEDDING_NOT_READY'
EMBEDDING_ROW_COUNT_UNKNOWN = 'EMBEDDING_ROW_COUNT_UNKNOWN'
EMBEDDING_ROW_COUNT_INVALID = 'EMBEDDING_ROW_COUNT_INVALID'
EMBEDDING_2_D_UNKNOWN = 'EMBEDDING_2D_UNKNOWN'
TAG_UNKNOWN = 'TAG_UNKNOWN'
TAG_NAME_EXISTS = 'TAG_NAME_EXISTS'
TAG_INITIAL_EXISTS = 'TAG_INITIAL_EXISTS'
TAG_UNDELETABLE_NOT_A_LEAF = 'TAG_UNDELETABLE_NOT_A_LEAF'
TAG_UNDELETABLE_IS_INITIAL = 'TAG_UNDELETABLE_IS_INITIAL'
TAG_NO_TAG_IN_DATASET = 'TAG_NO_TAG_IN_DATASET'
TAG_PREVTAG_NOT_IN_DATASET = 'TAG_PREVTAG_NOT_IN_DATASET'
TAG_QUERYTAG_NOT_IN_DATASET = 'TAG_QUERYTAG_NOT_IN_DATASET'
TAG_PRESELECTEDTAG_NOT_IN_DATASET = 'TAG_PRESELECTEDTAG_NOT_IN_DATASET'
TAG_NO_SCORES_AVAILABLE = 'TAG_NO_SCORES_AVAILABLE'
SAMPLE_UNKNOWN = 'SAMPLE_UNKNOWN'
SAMPLE_THUMBNAME_UNKNOWN = 'SAMPLE_THUMBNAME_UNKNOWN'
SAMPLE_CREATE_REQUEST_INVALID_FORMAT = 'SAMPLE_CREATE_REQUEST_INVALID_FORMAT'
SAMPLE_CREATE_REQUEST_INVALID_CROP_DATA = 'SAMPLE_CREATE_REQUEST_INVALID_CROP_DATA'
PREDICTION_TASK_SCHEMA_UNKNOWN = 'PREDICTION_TASK_SCHEMA_UNKNOWN'
PREDICTION_TASK_SCHEMA_CATEGORIES_NOT_UNIQUE = 'PREDICTION_TASK_SCHEMA_CATEGORIES_NOT_UNIQUE'
SCORE_UNKNOWN = 'SCORE_UNKNOWN'
SCORES_CANNOT_BE_SET = 'SCORES_CANNOT_BE_SET'
DOCKER_RUN_UNKNOWN = 'DOCKER_RUN_UNKNOWN'
DOCKER_RUN_DATASET_UNAVAILABLE = 'DOCKER_RUN_DATASET_UNAVAILABLE'
DOCKER_RUN_REPORT_UNAVAILABLE = 'DOCKER_RUN_REPORT_UNAVAILABLE'
DOCKER_RUN_ARTIFACT_UNKNOWN = 'DOCKER_RUN_ARTIFACT_UNKNOWN'
DOCKER_RUN_ARTIFACT_EXISTS = 'DOCKER_RUN_ARTIFACT_EXISTS'
DOCKER_RUN_ARTIFACT_UNAVAILABLE = 'DOCKER_RUN_ARTIFACT_UNAVAILABLE'
DOCKER_WORKER_UNKNOWN = 'DOCKER_WORKER_UNKNOWN'
DOCKER_WORKER_CONFIG_UNKNOWN = 'DOCKER_WORKER_CONFIG_UNKNOWN'
DOCKER_WORKER_CONFIG_NOT_COMPATIBLE_WITH_DATASOURCE = 'DOCKER_WORKER_CONFIG_NOT_COMPATIBLE_WITH_DATASOURCE'
DOCKER_WORKER_CONFIG_REFERENCES_INVALID_FILES = 'DOCKER_WORKER_CONFIG_REFERENCES_INVALID_FILES'
DOCKER_WORKER_CONFIG_IN_USE = 'DOCKER_WORKER_CONFIG_IN_USE'
DOCKER_WORKER_CONFIG_INVALID = 'DOCKER_WORKER_CONFIG_INVALID'
DOCKER_WORKER_SCHEDULE_UNKNOWN = 'DOCKER_WORKER_SCHEDULE_UNKNOWN'
DOCKER_WORKER_SCHEDULE_UPDATE_FAILED = 'DOCKER_WORKER_SCHEDULE_UPDATE_FAILED'
METADATA_CONFIGURATION_UNKNOWN = 'METADATA_CONFIGURATION_UNKNOWN'
CUSTOM_METADATA_AT_MAX_SIZE = 'CUSTOM_METADATA_AT_MAX_SIZE'
ACCOUNT_SUBSCRIPTION_INSUFFICIENT = 'ACCOUNT_SUBSCRIPTION_INSUFFICIENT'
TEAM_UNKNOWN = 'TEAM_UNKNOWN'
@classmethod
def from_json(cls, json_str: str) -> 'ApiErrorCode':
"""Create an instance of ApiErrorCode from a JSON string"""
return ApiErrorCode(json.loads(json_str))
| 5,197 | 43.810345 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/api_error_response.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictStr, conlist
from lightly.openapi_generated.swagger_client.models.api_error_code import ApiErrorCode
class ApiErrorResponse(BaseModel):
"""
ApiErrorResponse
"""
code: ApiErrorCode = Field(...)
error: StrictStr = Field(..., description="The detailed error message or code of the error")
request_id: Optional[StrictStr] = Field(None, alias="requestId", description="The identifier of a request. Helpful for debugging")
error_labels: Optional[conlist(StrictStr)] = Field(None, alias="errorLabels", description="Can occur on database errors")
__properties = ["code", "error", "requestId", "errorLabels"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ApiErrorResponse:
"""Create an instance of ApiErrorResponse from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ApiErrorResponse:
"""Create an instance of ApiErrorResponse from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ApiErrorResponse.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ApiErrorResponse) in the input: " + str(obj))
_obj = ApiErrorResponse.parse_obj({
"code": obj.get("code"),
"error": obj.get("error"),
"request_id": obj.get("requestId"),
"error_labels": obj.get("errorLabels")
})
return _obj
| 3,082 | 34.848837 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/async_task_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field, StrictStr
class AsyncTaskData(BaseModel):
"""
AsyncTaskData
"""
job_id: StrictStr = Field(..., alias="jobId")
__properties = ["jobId"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> AsyncTaskData:
"""Create an instance of AsyncTaskData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> AsyncTaskData:
"""Create an instance of AsyncTaskData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return AsyncTaskData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in AsyncTaskData) in the input: " + str(obj))
_obj = AsyncTaskData.parse_obj({
"job_id": obj.get("jobId")
})
return _obj
| 2,409 | 29.506329 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/auth0_on_sign_up_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field
from lightly.openapi_generated.swagger_client.models.auth0_on_sign_up_request_user import Auth0OnSignUpRequestUser
class Auth0OnSignUpRequest(BaseModel):
"""
Auth0OnSignUpRequest
"""
user: Auth0OnSignUpRequestUser = Field(...)
__properties = ["user"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> Auth0OnSignUpRequest:
"""Create an instance of Auth0OnSignUpRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of user
if self.user:
_dict['user' if by_alias else 'user'] = self.user.to_dict(by_alias=by_alias)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> Auth0OnSignUpRequest:
"""Create an instance of Auth0OnSignUpRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return Auth0OnSignUpRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in Auth0OnSignUpRequest) in the input: " + str(obj))
_obj = Auth0OnSignUpRequest.parse_obj({
"user": Auth0OnSignUpRequestUser.from_dict(obj.get("user")) if obj.get("user") is not None else None
})
return _obj
| 2,841 | 33.240964 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/auth0_on_sign_up_request_user.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, StrictStr
class Auth0OnSignUpRequestUser(BaseModel):
"""
Auth0OnSignUpRequestUser
"""
user_id: StrictStr = Field(..., alias="userId")
email: Optional[StrictStr] = None
locale: Optional[StrictStr] = None
nickname: Optional[StrictStr] = None
name: Optional[StrictStr] = None
given_name: Optional[StrictStr] = Field(None, alias="givenName")
family_name: Optional[StrictStr] = Field(None, alias="familyName")
__properties = ["userId", "email", "locale", "nickname", "name", "givenName", "familyName"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> Auth0OnSignUpRequestUser:
"""Create an instance of Auth0OnSignUpRequestUser from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> Auth0OnSignUpRequestUser:
"""Create an instance of Auth0OnSignUpRequestUser from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return Auth0OnSignUpRequestUser.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in Auth0OnSignUpRequestUser) in the input: " + str(obj))
_obj = Auth0OnSignUpRequestUser.parse_obj({
"user_id": obj.get("userId"),
"email": obj.get("email"),
"locale": obj.get("locale"),
"nickname": obj.get("nickname"),
"name": obj.get("name"),
"given_name": obj.get("givenName"),
"family_name": obj.get("familyName")
})
return _obj
| 3,161 | 33.747253 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/configuration_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List
from pydantic import Extra, BaseModel, Field, StrictStr, conint, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry
class ConfigurationData(BaseModel):
"""
ConfigurationData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
name: StrictStr = Field(...)
configs: conlist(ConfigurationEntry) = Field(...)
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
__properties = ["id", "name", "configs", "createdAt", "lastModifiedAt"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ConfigurationData:
"""Create an instance of ConfigurationData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of each item in configs (list)
_items = []
if self.configs:
for _item in self.configs:
if _item:
_items.append(_item.to_dict(by_alias=by_alias))
_dict['configs' if by_alias else 'configs'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ConfigurationData:
"""Create an instance of ConfigurationData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ConfigurationData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ConfigurationData) in the input: " + str(obj))
_obj = ConfigurationData.parse_obj({
"id": obj.get("id"),
"name": obj.get("name"),
"configs": [ConfigurationEntry.from_dict(_item) for _item in obj.get("configs")] if obj.get("configs") is not None else None,
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt")
})
return _obj
| 3,915 | 37.392157 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/configuration_entry.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Any, Optional
from pydantic import Extra, BaseModel, Field, constr
from lightly.openapi_generated.swagger_client.models.configuration_value_data_type import ConfigurationValueDataType
class ConfigurationEntry(BaseModel):
"""
ConfigurationEntry
"""
name: constr(strict=True, min_length=1) = Field(..., description="the name of this entry which is displayed in the UI")
path: constr(strict=True, min_length=1) = Field(..., description="the path is the dotnotation which is used to easily access the customMetadata JSON structure of a sample e.g myArray[0].myObject.field")
default_value: Optional[Any] = Field(..., alias="defaultValue", description="the default value used if its not possible to extract the value using the path or if the value extracted is nullish")
value_data_type: ConfigurationValueDataType = Field(..., alias="valueDataType")
__properties = ["name", "path", "defaultValue", "valueDataType"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ConfigurationEntry:
"""Create an instance of ConfigurationEntry from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# set to None if default_value (nullable) is None
# and __fields_set__ contains the field
if self.default_value is None and "default_value" in self.__fields_set__:
_dict['defaultValue' if by_alias else 'default_value'] = None
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ConfigurationEntry:
"""Create an instance of ConfigurationEntry from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ConfigurationEntry.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ConfigurationEntry) in the input: " + str(obj))
_obj = ConfigurationEntry.parse_obj({
"name": obj.get("name"),
"path": obj.get("path"),
"default_value": obj.get("defaultValue"),
"value_data_type": obj.get("valueDataType")
})
return _obj
| 3,612 | 38.703297 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/configuration_set_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List
from pydantic import Extra, BaseModel, Field, StrictStr, conlist
from lightly.openapi_generated.swagger_client.models.configuration_entry import ConfigurationEntry
class ConfigurationSetRequest(BaseModel):
"""
ConfigurationSetRequest
"""
name: StrictStr = Field(...)
configs: conlist(ConfigurationEntry) = Field(...)
__properties = ["name", "configs"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> ConfigurationSetRequest:
"""Create an instance of ConfigurationSetRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of each item in configs (list)
_items = []
if self.configs:
for _item in self.configs:
if _item:
_items.append(_item.to_dict(by_alias=by_alias))
_dict['configs' if by_alias else 'configs'] = _items
return _dict
@classmethod
def from_dict(cls, obj: dict) -> ConfigurationSetRequest:
"""Create an instance of ConfigurationSetRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return ConfigurationSetRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in ConfigurationSetRequest) in the input: " + str(obj))
_obj = ConfigurationSetRequest.parse_obj({
"name": obj.get("name"),
"configs": [ConfigurationEntry.from_dict(_item) for _item in obj.get("configs")] if obj.get("configs") is not None else None
})
return _obj
| 3,161 | 34.52809 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/configuration_value_data_type.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class ConfigurationValueDataType(str, Enum):
"""
We support different data types for the extracted value. This tells Lightly how to interpret the value and also allows you to do different things. - Numeric means the extracted values are in a range and have a lower and upper bound. E.g used for color ranges - Categorical means the extracted values are distinct and can be grouped. This allows us to e.g plot distributions of each unique value within your dataset and to map each unique value to a color - string: most often used for class/category e.g for city, animal or weather condition - int: e.g for ratings of a meal - boolean: for true/false distinctions as e.g isVerified or flashOn - datetime: e.g for grouping by time - timestamp: e.g for grouping by time - Other means that the extracted value is important to you but does not fit another category. It is displayed alongside other information in the sample detail. E.g the license
"""
"""
allowed enum values
"""
NUMERIC_INT = 'NUMERIC_INT'
NUMERIC_FLOAT = 'NUMERIC_FLOAT'
CATEGORICAL_STRING = 'CATEGORICAL_STRING'
CATEGORICAL_INT = 'CATEGORICAL_INT'
CATEGORICAL_BOOLEAN = 'CATEGORICAL_BOOLEAN'
CATEGORICAL_DATETIME = 'CATEGORICAL_DATETIME'
CATEGORICAL_TIMESTAMP = 'CATEGORICAL_TIMESTAMP'
OTHER_STRING = 'OTHER_STRING'
@classmethod
def from_json(cls, json_str: str) -> 'ConfigurationValueDataType':
"""Create an instance of ConfigurationValueDataType from a JSON string"""
return ConfigurationValueDataType(json.loads(json_str))
| 2,155 | 43 | 909 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/create_cf_bucket_activity_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field, StrictStr
class CreateCFBucketActivityRequest(BaseModel):
"""
CreateCFBucketActivityRequest
"""
name: StrictStr = Field(...)
bucket: StrictStr = Field(...)
__properties = ["name", "bucket"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CreateCFBucketActivityRequest:
"""Create an instance of CreateCFBucketActivityRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateCFBucketActivityRequest:
"""Create an instance of CreateCFBucketActivityRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateCFBucketActivityRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CreateCFBucketActivityRequest) in the input: " + str(obj))
_obj = CreateCFBucketActivityRequest.parse_obj({
"name": obj.get("name"),
"bucket": obj.get("bucket")
})
return _obj
| 2,618 | 31.333333 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/create_docker_worker_registry_entry_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictStr, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.creator import Creator
from lightly.openapi_generated.swagger_client.models.docker_worker_type import DockerWorkerType
class CreateDockerWorkerRegistryEntryRequest(BaseModel):
"""
CreateDockerWorkerRegistryEntryRequest
"""
name: constr(strict=True, min_length=3) = Field(...)
worker_type: DockerWorkerType = Field(..., alias="workerType")
labels: Optional[conlist(StrictStr)] = Field(None, description="The labels used for specifying the run-worker-relationship")
creator: Optional[Creator] = None
docker_version: Optional[StrictStr] = Field(None, alias="dockerVersion")
__properties = ["name", "workerType", "labels", "creator", "dockerVersion"]
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CreateDockerWorkerRegistryEntryRequest:
"""Create an instance of CreateDockerWorkerRegistryEntryRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateDockerWorkerRegistryEntryRequest:
"""Create an instance of CreateDockerWorkerRegistryEntryRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateDockerWorkerRegistryEntryRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CreateDockerWorkerRegistryEntryRequest) in the input: " + str(obj))
_obj = CreateDockerWorkerRegistryEntryRequest.parse_obj({
"name": obj.get("name"),
"worker_type": obj.get("workerType"),
"labels": obj.get("labels"),
"creator": obj.get("creator"),
"docker_version": obj.get("dockerVersion")
})
return _obj
| 3,735 | 37.916667 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/create_entity_response.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field, constr, validator
class CreateEntityResponse(BaseModel):
"""
CreateEntityResponse
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
__properties = ["id"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CreateEntityResponse:
"""Create an instance of CreateEntityResponse from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateEntityResponse:
"""Create an instance of CreateEntityResponse from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateEntityResponse.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CreateEntityResponse) in the input: " + str(obj))
_obj = CreateEntityResponse.parse_obj({
"id": obj.get("id")
})
return _obj
| 2,773 | 31.255814 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/create_sample_with_write_urls_response.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field, constr, validator
from lightly.openapi_generated.swagger_client.models.sample_write_urls import SampleWriteUrls
class CreateSampleWithWriteUrlsResponse(BaseModel):
"""
CreateSampleWithWriteUrlsResponse
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
sample_write_urls: SampleWriteUrls = Field(..., alias="sampleWriteUrls")
__properties = ["id", "sampleWriteUrls"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CreateSampleWithWriteUrlsResponse:
"""Create an instance of CreateSampleWithWriteUrlsResponse from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
# override the default output from pydantic by calling `to_dict()` of sample_write_urls
if self.sample_write_urls:
_dict['sampleWriteUrls' if by_alias else 'sample_write_urls'] = self.sample_write_urls.to_dict(by_alias=by_alias)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateSampleWithWriteUrlsResponse:
"""Create an instance of CreateSampleWithWriteUrlsResponse from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateSampleWithWriteUrlsResponse.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CreateSampleWithWriteUrlsResponse) in the input: " + str(obj))
_obj = CreateSampleWithWriteUrlsResponse.parse_obj({
"id": obj.get("id"),
"sample_write_urls": SampleWriteUrls.from_dict(obj.get("sampleWriteUrls")) if obj.get("sampleWriteUrls") is not None else None
})
return _obj
| 3,477 | 36.804348 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/create_team_membership_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from pydantic import Extra, BaseModel, Field, StrictStr
from lightly.openapi_generated.swagger_client.models.team_role import TeamRole
class CreateTeamMembershipRequest(BaseModel):
"""
CreateTeamMembershipRequest
"""
email: StrictStr = Field(..., description="email of the user")
role: TeamRole = Field(...)
__properties = ["email", "role"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CreateTeamMembershipRequest:
"""Create an instance of CreateTeamMembershipRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CreateTeamMembershipRequest:
"""Create an instance of CreateTeamMembershipRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CreateTeamMembershipRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CreateTeamMembershipRequest) in the input: " + str(obj))
_obj = CreateTeamMembershipRequest.parse_obj({
"email": obj.get("email"),
"role": obj.get("role")
})
return _obj
| 2,707 | 32.02439 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/creator.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class Creator(str, Enum):
"""
Creator
"""
"""
allowed enum values
"""
UNKNOWN = 'UNKNOWN'
USER_WEBAPP = 'USER_WEBAPP'
USER_PIP = 'USER_PIP'
USER_PIP_LIGHTLY_MAGIC = 'USER_PIP_LIGHTLY_MAGIC'
USER_WORKER = 'USER_WORKER'
@classmethod
def from_json(cls, json_str: str) -> 'Creator':
"""Create an instance of Creator from a JSON string"""
return Creator(json.loads(json_str))
| 1,011 | 21 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/crop_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Union
from pydantic import Extra, BaseModel, Field, confloat, conint, constr, validator
class CropData(BaseModel):
"""
CropData
"""
parent_id: constr(strict=True) = Field(..., alias="parentId", description="MongoDB ObjectId")
prediction_uuid_timestamp: conint(strict=True, ge=0) = Field(..., alias="predictionUUIDTimestamp", description="unix timestamp in milliseconds")
prediction_index: conint(strict=True, ge=0) = Field(..., alias="predictionIndex", description="the index of this crop within all found prediction singletons of a sampleId (the parentId)")
prediction_task_name: constr(strict=True, min_length=1) = Field(..., alias="predictionTaskName", description="A name which is safe to have as a file/folder name in a file system")
prediction_task_category_id: conint(strict=True, ge=0) = Field(..., alias="predictionTaskCategoryId", description="The id of the category. Needs to be a positive integer but can be any integer (gaps are allowed, does not need to be sequential)")
prediction_task_score: Union[confloat(le=1, ge=0, strict=True), conint(le=1, ge=0, strict=True)] = Field(..., alias="predictionTaskScore", description="the score for the prediction task which yielded this crop")
__properties = ["parentId", "predictionUUIDTimestamp", "predictionIndex", "predictionTaskName", "predictionTaskCategoryId", "predictionTaskScore"]
@validator('parent_id')
def parent_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('prediction_task_name')
def prediction_task_name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 ._-]+$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> CropData:
"""Create an instance of CropData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> CropData:
"""Create an instance of CropData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return CropData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in CropData) in the input: " + str(obj))
_obj = CropData.parse_obj({
"parent_id": obj.get("parentId"),
"prediction_uuid_timestamp": obj.get("predictionUUIDTimestamp"),
"prediction_index": obj.get("predictionIndex"),
"prediction_task_name": obj.get("predictionTaskName"),
"prediction_task_category_id": obj.get("predictionTaskCategoryId"),
"prediction_task_score": obj.get("predictionTaskScore")
})
return _obj
| 4,574 | 43.417476 | 249 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/dataset_create_request.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, constr, validator
from lightly.openapi_generated.swagger_client.models.dataset_creator import DatasetCreator
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
class DatasetCreateRequest(BaseModel):
"""
DatasetCreateRequest
"""
name: constr(strict=True, min_length=3) = Field(...)
type: Optional[DatasetType] = None
img_type: Optional[ImageType] = Field(None, alias="imgType")
creator: Optional[DatasetCreator] = None
parent_dataset_id: Optional[constr(strict=True)] = Field(None, alias="parentDatasetId", description="MongoDB ObjectId")
__properties = ["name", "type", "imgType", "creator", "parentDatasetId"]
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
@validator('parent_dataset_id')
def parent_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasetCreateRequest:
"""Create an instance of DatasetCreateRequest from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasetCreateRequest:
"""Create an instance of DatasetCreateRequest from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasetCreateRequest.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasetCreateRequest) in the input: " + str(obj))
_obj = DatasetCreateRequest.parse_obj({
"name": obj.get("name"),
"type": obj.get("type"),
"img_type": obj.get("imgType"),
"creator": obj.get("creator"),
"parent_dataset_id": obj.get("parentDatasetId")
})
return _obj
| 3,948 | 35.906542 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/dataset_creator.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
import json
import pprint
import re # noqa: F401
from enum import Enum
from aenum import no_arg # type: ignore
class DatasetCreator(str, Enum):
"""
DatasetCreator
"""
"""
allowed enum values
"""
UNKNOWN = 'UNKNOWN'
USER_WEBAPP = 'USER_WEBAPP'
USER_PIP = 'USER_PIP'
USER_PIP_LIGHTLY_MAGIC = 'USER_PIP_LIGHTLY_MAGIC'
USER_WORKER = 'USER_WORKER'
@classmethod
def from_json(cls, json_str: str) -> 'DatasetCreator':
"""Create an instance of DatasetCreator from a JSON string"""
return DatasetCreator(json.loads(json_str))
| 1,046 | 21.76087 | 220 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/dataset_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictInt, StrictStr, conint, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType
class DatasetData(BaseModel):
"""
DatasetData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
name: constr(strict=True, min_length=3) = Field(...)
user_id: StrictStr = Field(..., alias="userId", description="The owner of the dataset")
access_type: Optional[SharedAccessType] = Field(None, alias="accessType")
type: DatasetType = Field(...)
img_type: Optional[ImageType] = Field(None, alias="imgType")
n_samples: StrictInt = Field(..., alias="nSamples")
size_in_bytes: StrictInt = Field(..., alias="sizeInBytes")
meta_data_configuration_id: Optional[constr(strict=True)] = Field(None, alias="metaDataConfigurationId", description="MongoDB ObjectId")
datasources: Optional[conlist(constr(strict=True))] = None
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
datasource_processed_until_timestamp: Optional[conint(strict=True, ge=0)] = Field(None, alias="datasourceProcessedUntilTimestamp", description="unix timestamp in seconds")
access_role: Optional[constr(strict=True)] = Field(None, alias="accessRole", description="AccessRole bitmask of the one accessing the dataset")
parent_dataset_id: Optional[constr(strict=True)] = Field(None, alias="parentDatasetId", description="MongoDB ObjectId")
original_dataset_id: Optional[constr(strict=True)] = Field(None, alias="originalDatasetId", description="MongoDB ObjectId")
__properties = ["id", "name", "userId", "accessType", "type", "imgType", "nSamples", "sizeInBytes", "metaDataConfigurationId", "datasources", "createdAt", "lastModifiedAt", "datasourceProcessedUntilTimestamp", "accessRole", "parentDatasetId", "originalDatasetId"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
@validator('meta_data_configuration_id')
def meta_data_configuration_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('access_role')
def access_role_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^0b[01]{6}$", value):
raise ValueError(r"must validate the regular expression /^0b[01]{6}$/")
return value
@validator('parent_dataset_id')
def parent_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('original_dataset_id')
def original_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasetData:
"""Create an instance of DatasetData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasetData:
"""Create an instance of DatasetData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasetData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasetData) in the input: " + str(obj))
_obj = DatasetData.parse_obj({
"id": obj.get("id"),
"name": obj.get("name"),
"user_id": obj.get("userId"),
"access_type": obj.get("accessType"),
"type": obj.get("type"),
"img_type": obj.get("imgType"),
"n_samples": obj.get("nSamples"),
"size_in_bytes": obj.get("sizeInBytes"),
"meta_data_configuration_id": obj.get("metaDataConfigurationId"),
"datasources": obj.get("datasources"),
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt"),
"datasource_processed_until_timestamp": obj.get("datasourceProcessedUntilTimestamp"),
"access_role": obj.get("accessRole"),
"parent_dataset_id": obj.get("parentDatasetId"),
"original_dataset_id": obj.get("originalDatasetId")
})
return _obj
| 7,328 | 43.150602 | 267 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/dataset_data_enriched.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictInt, StrictStr, conint, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType
class DatasetDataEnriched(BaseModel):
"""
DatasetDataEnriched
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
name: constr(strict=True, min_length=3) = Field(...)
user_id: StrictStr = Field(..., alias="userId", description="The owner of the dataset")
access_type: Optional[SharedAccessType] = Field(None, alias="accessType")
type: DatasetType = Field(...)
img_type: Optional[ImageType] = Field(None, alias="imgType")
n_samples: StrictInt = Field(..., alias="nSamples")
size_in_bytes: StrictInt = Field(..., alias="sizeInBytes")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
meta_data_configuration_id: Optional[constr(strict=True)] = Field(None, alias="metaDataConfigurationId", description="MongoDB ObjectId")
access_role: Optional[constr(strict=True)] = Field(None, alias="accessRole", description="AccessRole bitmask of the one accessing the dataset")
datasources: Optional[conlist(constr(strict=True))] = None
parent_dataset_id: Optional[constr(strict=True)] = Field(None, alias="parentDatasetId", description="MongoDB ObjectId")
original_dataset_id: Optional[constr(strict=True)] = Field(None, alias="originalDatasetId", description="MongoDB ObjectId")
samples: conlist(constr(strict=True)) = Field(...)
n_tags: StrictInt = Field(..., alias="nTags")
n_embeddings: StrictInt = Field(..., alias="nEmbeddings")
__properties = ["id", "name", "userId", "accessType", "type", "imgType", "nSamples", "sizeInBytes", "createdAt", "lastModifiedAt", "metaDataConfigurationId", "accessRole", "datasources", "parentDatasetId", "originalDatasetId", "samples", "nTags", "nEmbeddings"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
@validator('meta_data_configuration_id')
def meta_data_configuration_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('access_role')
def access_role_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^0b[01]{6}$", value):
raise ValueError(r"must validate the regular expression /^0b[01]{6}$/")
return value
@validator('parent_dataset_id')
def parent_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('original_dataset_id')
def original_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasetDataEnriched:
"""Create an instance of DatasetDataEnriched from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasetDataEnriched:
"""Create an instance of DatasetDataEnriched from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasetDataEnriched.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasetDataEnriched) in the input: " + str(obj))
_obj = DatasetDataEnriched.parse_obj({
"id": obj.get("id"),
"name": obj.get("name"),
"user_id": obj.get("userId"),
"access_type": obj.get("accessType"),
"type": obj.get("type"),
"img_type": obj.get("imgType"),
"n_samples": obj.get("nSamples"),
"size_in_bytes": obj.get("sizeInBytes"),
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt"),
"meta_data_configuration_id": obj.get("metaDataConfigurationId"),
"access_role": obj.get("accessRole"),
"datasources": obj.get("datasources"),
"parent_dataset_id": obj.get("parentDatasetId"),
"original_dataset_id": obj.get("originalDatasetId"),
"samples": obj.get("samples"),
"n_tags": obj.get("nTags"),
"n_embeddings": obj.get("nEmbeddings")
})
return _obj
| 7,426 | 42.688235 | 265 | py |
lightly | lightly-master/lightly/openapi_generated/swagger_client/models/dataset_embedding_data.py | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@lightly.ai
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, StrictBool, StrictStr, conint, constr, validator
class DatasetEmbeddingData(BaseModel):
"""
DatasetEmbeddingData
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
name: StrictStr = Field(..., description="name of the embedding chosen by the user calling writeCSVUrl")
is_processed: StrictBool = Field(..., alias="isProcessed", description="indicator whether embeddings have already been processed by a background worker")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
is2d: Optional[StrictBool] = Field(None, description="flag set by the background worker if the embedding is 2d")
__properties = ["id", "name", "isProcessed", "createdAt", "is2d"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasetEmbeddingData:
"""Create an instance of DatasetEmbeddingData from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasetEmbeddingData:
"""Create an instance of DatasetEmbeddingData from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasetEmbeddingData.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasetEmbeddingData) in the input: " + str(obj))
_obj = DatasetEmbeddingData.parse_obj({
"id": obj.get("id"),
"name": obj.get("name"),
"is_processed": obj.get("isProcessed"),
"created_at": obj.get("createdAt"),
"is2d": obj.get("is2d")
})
return _obj
| 3,553 | 36.808511 | 220 | py |