id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
6,428 | from importlib import import_module
from dzoedepth.models.depth_model import DepthModel
class DepthModel(nn.Module):
def __init__(self):
super().__init__()
self.device = 'cpu'
def to(self, device) -> nn.Module:
self.device = device
return super().to(device)
def forward(self, x, *args, **kwargs):
raise NotImplementedError
def _infer(self, x: torch.Tensor):
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
return self(x)['metric_depth']
def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor:
"""
Inference interface for the model with padding augmentation
Padding augmentation fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
This augmentation pads the input image and crops the prediction back to the original size / view.
Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to pad the input or not. Defaults to True.
fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
padding_mode (str, optional): padding mode. Defaults to "reflect".
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# assert x is nchw and c = 3
assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1])
if pad_input:
assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
pad_h = int(np.sqrt(x.shape[2]/2) * fh)
pad_w = int(np.sqrt(x.shape[3]/2) * fw)
padding = [pad_w, pad_w]
if pad_h > 0:
padding += [pad_h, pad_h]
x = F.pad(x, padding, mode=padding_mode, **kwargs)
out = self._infer(x)
if out.shape[-2:] != x.shape[-2:]:
out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
if pad_input:
# crop to the original size, handling the case where pad_h and pad_w is 0
if pad_h > 0:
out = out[:, :, pad_h:-pad_h,:]
if pad_w > 0:
out = out[:, :, :, pad_w:-pad_w]
return out
def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:
"""
Inference interface for the model with horizontal flip augmentation
Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# infer with horizontal flip and average
out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
out = (out + torch.flip(out_flip, dims=[3])) / 2
return out
def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
if with_flip_aug:
return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
else:
return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:
"""
Inference interface for the model for PIL image
Args:
pil_img (PIL.Image.Image): input PIL image
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy".
"""
x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)
out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)
if output_type == "numpy":
return out_tensor.squeeze().cpu().numpy()
elif output_type == "pil":
# uint16 is required for depth pil image
out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)
return Image.fromarray(out_16bit_numpy)
elif output_type == "tensor":
return out_tensor.squeeze().cpu()
else:
raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'")
The provided code snippet includes necessary dependencies for implementing the `build_model` function. Write a Python function `def build_model(config) -> DepthModel` to solve the following problem:
Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface. This function should be used to construct models for training and evaluation. Args: config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder. Returns: torch.nn.Module: Model corresponding to name and version as specified in config
Here is the function:
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"dzoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config) | Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface. This function should be used to construct models for training and evaluation. Args: config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder. Returns: torch.nn.Module: Model corresponding to name and version as specified in config |
6,429 | import json
import os
from dzoedepth.utils.easydict import EasyDict as edict
from dzoedepth.utils.arg_utils import infer_type
import pathlib
import platform
COMMON_CONFIG = {
"save_dir": os.path.expanduser("~/shortcuts/monodepth3_checkpoints"),
"project": "ZoeDepth",
"tags": '',
"notes": "",
"gpu": None,
"root": ".",
"uid": None,
"print_losses": False
}
DATASETS_CONFIG = {
"kitti": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216, # 704
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"kitti_test": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": False,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"nyu": {
"dataset": "nyu",
"avoid_boundary": False,
"min_depth": 1e-3, # originally 0.1
"max_depth": 10,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
"input_height": 480,
"input_width": 640,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth_diff": -10,
"max_depth_diff": 10,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": False,
"garg_crop": False,
"eigen_crop": True
},
"ibims": {
"dataset": "ibims",
"ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"sunrgbd": {
"dataset": "sunrgbd",
"sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 8,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_indoor": {
"dataset": "diml_indoor",
"diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_outdoor": {
"dataset": "diml_outdoor",
"diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 2,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"diode_indoor": {
"dataset": "diode_indoor",
"diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diode_outdoor": {
"dataset": "diode_outdoor",
"diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"hypersim_test": {
"dataset": "hypersim_test",
"hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 10
},
"vkitti": {
"dataset": "vkitti",
"vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"vkitti2": {
"dataset": "vkitti2",
"vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
"ddad": {
"dataset": "ddad",
"ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
}
COMMON_TRAINING_CONFIG = {
"dataset": "nyu",
"distributed": True,
"workers": 16,
"clip_grad": 0.1,
"use_shared_dict": False,
"shared_dict": None,
"use_amp": False,
"aug": True,
"random_crop": False,
"random_translate": False,
"translate_prob": 0.2,
"max_translation": 100,
"validate_every": 0.25,
"log_images_every": 0.1,
"prefetch": False,
}
def flatten(config, except_keys=('bin_conf')):
def recurse(inp):
if isinstance(inp, dict):
for key, value in inp.items():
if key in except_keys:
yield (key, value)
if isinstance(value, dict):
yield from recurse(value)
else:
yield (key, value)
return dict(list(recurse(config)))
def split_combined_args(kwargs):
"""Splits the arguments that are combined with '__' into multiple arguments.
Combined arguments should have equal number of keys and values.
Keys are separated by '__' and Values are separated with ';'.
For example, '__n_bins__lr=256;0.001'
Args:
kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format.
Returns:
dict: Parsed dict with the combined arguments split into individual key-value pairs.
"""
new_kwargs = dict(kwargs)
for key, value in kwargs.items():
if key.startswith("__"):
keys = key.split("__")[1:]
values = value.split(";")
assert len(keys) == len(
values), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})"
for k, v in zip(keys, values):
new_kwargs[k] = v
return new_kwargs
def parse_list(config, key, dtype=int):
"""Parse a list of values for the key if the value is a string. The values are separated by a comma.
Modifies the config in place.
"""
if key in config:
if isinstance(config[key], str):
config[key] = list(map(dtype, config[key].split(',')))
assert isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]]
), f"{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}."
def update_model_config(config, mode, model_name, model_version=None, strict=False):
model_config = get_model_config(model_name, model_version)
if model_config is not None:
config = {**config, **
flatten({**model_config.model, **model_config[mode]})}
elif strict:
raise ValueError(f"Config file for model {model_name} not found.")
return config
def check_choices(name, value, choices):
# return # No checks in dev branch
if value not in choices:
raise ValueError(f"{name} {value} not in supported choices {choices}")
KEYS_TYPE_BOOL = ["use_amp", "distributed", "use_shared_dict", "same_lr", "aug", "three_phase",
"prefetch", "cycle_momentum"]
def infer_type(x): # hacky way to infer type from string args
if not isinstance(x, str):
return x
try:
x = int(x)
return x
except ValueError:
pass
try:
x = float(x)
return x
except ValueError:
pass
return x
The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs)` to solve the following problem:
Main entry point to get the config for the model. Args: model_name (str): name of the desired model. mode (str, optional): "train" or "infer". Defaults to 'train'. dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None. Keyword Args: key-value pairs of arguments to overwrite the default config. The order of precedence for overwriting the config is (Higher precedence first): # 1. overwrite_kwargs # 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json # 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json # 4. common_config: Default config for all models specified in COMMON_CONFIG Returns: easydict: The config dictionary for the model.
Here is the function:
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
"""Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model.
"""
check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
check_choices("Mode", mode, ["train", "infer", "eval"])
if mode == "train":
check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None])
config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
config = update_model_config(config, mode, model_name)
# update with model version specific config
version_name = overwrite_kwargs.get("version_name", config["version_name"])
config = update_model_config(config, mode, model_name, version_name)
# update with config version if specified
config_version = overwrite_kwargs.get("config_version", None)
if config_version is not None:
print("Overwriting config with config_version", config_version)
config = update_model_config(config, mode, model_name, config_version)
# update with overwrite_kwargs
# Combined args are useful for hyperparameter search
overwrite_kwargs = split_combined_args(overwrite_kwargs)
config = {**config, **overwrite_kwargs}
# Casting to bool # TODO: Not necessary. Remove and test
for key in KEYS_TYPE_BOOL:
if key in config:
config[key] = bool(config[key])
# Model specific post processing of config
parse_list(config, "n_attractors")
# adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
bin_conf = config['bin_conf'] # list of dicts
n_bins = overwrite_kwargs['n_bins']
new_bin_conf = []
for conf in bin_conf:
conf['n_bins'] = n_bins
new_bin_conf.append(conf)
config['bin_conf'] = new_bin_conf
if mode == "train":
orig_dataset = dataset
if dataset == "mix":
dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
if dataset is not None:
config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
if dataset is not None:
config['dataset'] = dataset
config = {**DATASETS_CONFIG[dataset], **config}
config['model'] = model_name
typed_config = {k: infer_type(v) for k, v in config.items()}
# add hostname to config
config['hostname'] = platform.node()
return edict(typed_config) | Main entry point to get the config for the model. Args: model_name (str): name of the desired model. mode (str, optional): "train" or "infer". Defaults to 'train'. dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None. Keyword Args: key-value pairs of arguments to overwrite the default config. The order of precedence for overwriting the config is (Higher precedence first): # 1. overwrite_kwargs # 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json # 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json # 4. common_config: Default config for all models specified in COMMON_CONFIG Returns: easydict: The config dictionary for the model. |
6,430 | import json
import os
from dzoedepth.utils.easydict import EasyDict as edict
from dzoedepth.utils.arg_utils import infer_type
import pathlib
import platform
DATASETS_CONFIG = {
"kitti": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216, # 704
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"kitti_test": {
"dataset": "kitti",
"min_depth": 0.001,
"max_depth": 80,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
"input_height": 352,
"input_width": 1216,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
"filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"do_random_rotate": False,
"degree": 1.0,
"do_kb_crop": True,
"garg_crop": True,
"eigen_crop": False,
"use_right": False
},
"nyu": {
"dataset": "nyu",
"avoid_boundary": False,
"min_depth": 1e-3, # originally 0.1
"max_depth": 10,
"data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
"filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
"input_height": 480,
"input_width": 640,
"data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
"filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth_diff": -10,
"max_depth_diff": 10,
"do_random_rotate": True,
"degree": 1.0,
"do_kb_crop": False,
"garg_crop": False,
"eigen_crop": True
},
"ibims": {
"dataset": "ibims",
"ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"sunrgbd": {
"dataset": "sunrgbd",
"sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 8,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_indoor": {
"dataset": "diml_indoor",
"diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 0,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diml_outdoor": {
"dataset": "diml_outdoor",
"diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 2,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"diode_indoor": {
"dataset": "diode_indoor",
"diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 10,
"min_depth": 1e-3,
"max_depth": 10
},
"diode_outdoor": {
"dataset": "diode_outdoor",
"diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"hypersim_test": {
"dataset": "hypersim_test",
"hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"),
"eigen_crop": True,
"garg_crop": False,
"do_kb_crop": False,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 10
},
"vkitti": {
"dataset": "vkitti",
"vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80
},
"vkitti2": {
"dataset": "vkitti2",
"vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
"ddad": {
"dataset": "ddad",
"ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"),
"eigen_crop": False,
"garg_crop": True,
"do_kb_crop": True,
"min_depth_eval": 1e-3,
"max_depth_eval": 80,
"min_depth": 1e-3,
"max_depth": 80,
},
}
def change_dataset(config, new_dataset):
config.update(DATASETS_CONFIG[new_dataset])
return config | null |
6,431 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
The provided code snippet includes necessary dependencies for implementing the `denormalize` function. Write a Python function `def denormalize(x)` to solve the following problem:
Reverses the imagenet normalization applied to the input. Args: x (torch.Tensor - shape(N,3,H,W)): input tensor Returns: torch.Tensor - shape(N,3,H,W): Denormalized input
Here is the function:
def denormalize(x):
"""Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
return x * std + mean | Reverses the imagenet normalization applied to the input. Args: x (torch.Tensor - shape(N,3,H,W)): input tensor Returns: torch.Tensor - shape(N,3,H,W): Denormalized input |
6,432 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def count_parameters(model, include_all=False):
return sum(p.numel() for p in model.parameters() if p.requires_grad or include_all) | null |
6,433 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def compute_errors(gt, pred):
"""Compute metrics for 'pred' compared to 'gt'
Args:
gt (numpy.ndarray): Ground truth values
pred (numpy.ndarray): Predicted values
gt.shape should be equal to pred.shape
Returns:
dict: Dictionary containing the following metrics:
'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25
'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2
'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3
'abs_rel': Absolute relative error
'rmse': Root mean squared error
'log_10': Absolute log10 error
'sq_rel': Squared relative error
'rmse_log': Root mean squared error on the log scale
'silog': Scale invariant log error
"""
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
silog=silog, sq_rel=sq_rel)
The provided code snippet includes necessary dependencies for implementing the `compute_metrics` function. Write a Python function `def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs)` to solve the following problem:
Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.
Here is the function:
def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs):
"""Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.
"""
if 'config' in kwargs:
config = kwargs['config']
garg_crop = config.garg_crop
eigen_crop = config.eigen_crop
min_depth_eval = config.min_depth_eval
max_depth_eval = config.max_depth_eval
if gt.shape[-2:] != pred.shape[-2:] and interpolate:
pred = nn.functional.interpolate(
pred, gt.shape[-2:], mode='bilinear', align_corners=True)
pred = pred.squeeze().cpu().numpy()
pred[pred < min_depth_eval] = min_depth_eval
pred[pred > max_depth_eval] = max_depth_eval
pred[np.isinf(pred)] = max_depth_eval
pred[np.isnan(pred)] = min_depth_eval
gt_depth = gt.squeeze().cpu().numpy()
valid_mask = np.logical_and(
gt_depth > min_depth_eval, gt_depth < max_depth_eval)
if garg_crop or eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif eigen_crop:
# print("-"*10, " EIGEN CROP ", "-"*10)
if dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
# assert gt_depth.shape == (480, 640), "Error: Eigen crop is currently only valid for (480, 640) images"
eval_mask[45:471, 41:601] = 1
else:
eval_mask = np.ones(valid_mask.shape)
valid_mask = np.logical_and(valid_mask, eval_mask)
return compute_errors(gt_depth[valid_mask], pred[valid_mask]) | Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics. |
6,434 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def parallelize(config, model, find_unused_parameters=True):
if config.gpu is not None:
torch.cuda.set_device(config.gpu)
model = model.cuda(config.gpu)
config.multigpu = False
if config.distributed:
# Use DDP
config.multigpu = True
config.rank = config.rank * config.ngpus_per_node + config.gpu
dist.init_process_group(backend=config.dist_backend, init_method=config.dist_url,
world_size=config.world_size, rank=config.rank)
config.batch_size = int(config.batch_size / config.ngpus_per_node)
# config.batch_size = 8
config.workers = int(
(config.num_workers + config.ngpus_per_node - 1) / config.ngpus_per_node)
print("Device", config.gpu, "Rank", config.rank, "batch size",
config.batch_size, "Workers", config.workers)
torch.cuda.set_device(config.gpu)
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = model.cuda(config.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.gpu], output_device=config.gpu,
find_unused_parameters=find_unused_parameters)
elif config.gpu is None:
# Use DP
config.multigpu = True
model = model.cuda()
model = torch.nn.DataParallel(model)
return model | null |
6,435 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
class colors:
def printc(text, color):
print(f"{color}{text}{colors.reset}") | null |
6,436 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def get_image_from_url(url):
def url_to_torch(url, size=(384, 384)):
img = get_image_from_url(url)
img = img.resize(size, Image.ANTIALIAS)
img = torch.from_numpy(np.asarray(img)).float()
img = img.permute(2, 0, 1)
img.div_(255)
return img | null |
6,437 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def pil_to_batched_tensor(img):
return ToTensor()(img).unsqueeze(0) | null |
6,438 | from scipy import ndimage
import base64
import math
import re
from io import BytesIO
import matplotlib
import matplotlib.cm
import numpy as np
import requests
import torch
import torch.distributed as dist
import torch.nn
import torch.nn as nn
import torch.utils.data.distributed
from PIL import Image
from torchvision.transforms import ToTensor
def save_raw_16bit(depth, fpath="raw.png"):
if isinstance(depth, torch.Tensor):
depth = depth.squeeze().cpu().numpy()
assert isinstance(depth, np.ndarray), "Depth must be a torch tensor or numpy array"
assert depth.ndim == 2, "Depth must be 2D"
depth = depth * 256 # scale for 16-bit png
depth = depth.astype(np.uint16)
depth = Image.fromarray(depth)
depth.save(fpath)
print("Saved raw depth to", fpath) | null |
6,439 | def infer_type(x): # hacky way to infer type from string args
if not isinstance(x, str):
return x
try:
x = int(x)
return x
except ValueError:
pass
try:
x = float(x)
return x
except ValueError:
pass
return x
def parse_unknown(unknown_args):
clean = []
for a in unknown_args:
if "=" in a:
k, v = a.split("=")
clean.extend([k, v])
else:
clean.append(a)
keys = clean[::2]
values = clean[1::2]
return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)} | null |
6,440 | import math
from PIL import Image
import numpy
def zoom_in_effect(clip, zoom_ratio=0.04):
def effect(get_frame, t):
img = Image.fromarray(get_frame(t))
base_size = img.size
new_size = [
math.ceil(img.size[0] * (1 + (zoom_ratio * t))),
math.ceil(img.size[1] * (1 + (zoom_ratio * t)))
]
# The new dimensions must be even.
new_size[0] = new_size[0] + (new_size[0] % 2)
new_size[1] = new_size[1] + (new_size[1] % 2)
img = img.resize(new_size, Image.LANCZOS)
x = math.ceil((new_size[0] - base_size[0]) / 2)
y = math.ceil((new_size[1] - base_size[1]) / 2)
img = img.crop([
x, y, new_size[0] - x, new_size[1] - y
]).resize(base_size, Image.LANCZOS)
result = numpy.array(img)
img.close()
return result
return clip.fl(effect) | null |
6,441 | from generator.image.build import build_image_generator
from generator.video.build import build_video_generator
from generator.tts.build import build_tts_generator
from generator.text.build import build_text_generator
from generator.music.build import build_bgm_generator
from editor.chat_editor import Text2VideoEditor
from comm.mylog import logger
def build_image_generator(cfg):
def build_video_generator(cfg):
def build_tts_generator(cfg):
def build_text_generator(cfg):
def build_bgm_generator(cfg):
class Text2VideoEditor(object):
def __init__(self,
cfg,
text_generator,
vision_generator,
audio_generator,
bgm_generator,
) -> None:
def run(self,input_text,style="",out_file="test.mp4"):
logger = build_logger()
def build_editor(cfg):
visual_gen_type = cfg.video_editor.visual_gen.type
logger.info('visual_gen_type: {}'.format(visual_gen_type))
# image_by_diffusion video_by_retrieval image_by_retrieval_then_diffusion video_by_diffusion
if visual_gen_type in ["image_by_retrieval","image_by_diffusion","image_by_retrieval_then_diffusion"]:
vision_generator = build_image_generator(cfg)
else:
vision_generator = build_video_generator(cfg)
text_generator = build_text_generator(cfg)
audio_generator = build_tts_generator(cfg)
bgm_generator = build_bgm_generator(cfg)
editor = Text2VideoEditor(cfg,text_generator, vision_generator, audio_generator,bgm_generator)
return editor | null |
6,442 | import logging
logger = build_logger()
def build_logger():
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
return logger | null |
6,443 | from bs4 import BeautifulSoup
import requests
import json
def get_paragraph_texts(url: str):
html: str = requests.get(url).text
soup = BeautifulSoup(html, "html.parser")
pes = soup.findAll('p')
texts: list[str] = []
for e in pes:
texts.append(e.get_text())
return texts | null |
6,444 | import openai
import os
import re
from comm.mylog import logger
from comm.url_parser import get_paragraph_texts
def is_all_chinese(strs):
for _char in strs:
if not '\u4e00' <= _char <= '\u9fa5':
return False
return True | null |
6,445 | import openai
import os
import re
from comm.mylog import logger
from comm.url_parser import get_paragraph_texts
def is_contains_chinese(strs):
for _char in strs:
if '\u4e00' <= _char <= '\u9fa5':
return True
return False | null |
6,446 | import urllib
import urllib.request
import io
import traceback
import os
from PIL import Image
from typing import List
from comm.mylog import logger
def download_image(url):
urllib_request = urllib.request.Request(
url,
data=None,
headers={"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"},
)
with urllib.request.urlopen(urllib_request, timeout=10) as r:
img_stream = io.BytesIO(r.read())
return img_stream | null |
6,447 | import clip
from multilingual_clip import pt_multilingual_clip
import transformers
import torch
import numpy as np
def build_clip_model(model_name = "Vit-L/14", device="cpu"):
model, preprocess = clip.load(model_name, device = device)
return model,preprocess, lambda t: clip.tokenize(t, truncate=True) | null |
6,448 | import clip
from multilingual_clip import pt_multilingual_clip
import transformers
import torch
import numpy as np
def build_mclip_model(model_name = "M-CLIP/XLM-Roberta-Large-Vit-L-14", device="cpu"):
model = MClip(model_name,device)
return model,None,model.get_tokenizer | null |
6,449 | import clip
from multilingual_clip import pt_multilingual_clip
import transformers
import torch
import numpy as np
def test_mclip():
model = MClip("M-CLIP/XLM-Roberta-Large-Vit-L-14","cpu")
text = ["hello world","你好"]
embed = model.get_text_embed(text)
print(embed.shape) | null |
6,450 | from generator.comm.media_generator import MediaGeneratorBase
import urllib
import urllib.request
import io
import traceback
import os
from PIL import Image
from typing import List
from comm.mylog import logger
def download_video(url):
urllib_request = urllib.request.Request(
url,
data=None,
headers={"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0"},
)
with urllib.request.urlopen(urllib_request, timeout=30) as r:
video_stream = io.BytesIO(r.read())
return video_stream | null |
6,454 | from yacs.config import CfgNode as CN
_C = CN()
_C.video_editor = CN()
_C.video_editor.type = "Text2Video"
_C.video_editor.visual_gen.type = "image_by_retrieval" o_editor.visual_gen.image_by_retrieval = CN()
_C.video_editor.visual_gen.image_by_retrieval.model = "ViT-L/14"
_C.video_editor.visual_gen.image_by_retrieval.model_path = ""
_C.video_editor.visual_gen.image_by_retrieval.device = "cpu"
_C.video_editor.visual_gen.image_by_retrieval.index_path = ""
_C.video_editor.visual_gen.image_by_retrieval.meta_path = ""
_C.video_editor.visual_gen.video_by_retrieval = CN()
_C.video_editor.visual_gen.video_by_retrieval.model = "ViT-B/32"
_C.video_editor.visual_gen.video_by_retrieval.model_path = ""
_C.video_editor.visual_gen.video_by_retrieval.device = "cpu"
_C.video_editor.visual_gen.video_by_retrieval.index_path = ""
_C.video_editor.visual_gen.video_by_retrieval.meta_path = ""
_C.video_editor.visual_gen.image_by_diffusion = CN()
_C.video_editor.visual_gen.image_by_diffusion.model_id = "stabilityai/stable-diffusion-2-1"
_C.video_editor.visual_gen.image_by_retrieval_then_diffusion = CN()
_C.video_editor.visual_gen.image_by_retrieval_then_diffusion.model_id = "stabilityai/stable-diffusion-2-1"
_C.video_editor.text_gen = CN()
_C.video_editor.text_gen.type = "toy"
_C.video_editor.text_gen.organization = ""
_C.video_editor.text_gen.api_key = ""
_C.video_editor.tts_gen = CN()
_C.video_editor.tts_gen.model = "PaddleSpeechTTS"
_C.video_editor.tts_gen.am = 'fastspeech2_mix'
_C.video_editor.tts_gen.lang = 'mix'
_C.video_editor.subtitle = CN()
_C.video_editor.subtitle.font=""
_C.video_editor.bgm_gen = CN()
_C.video_editor.bgm_gen.type = "toy"
The provided code snippet includes necessary dependencies for implementing the `get_cfg_defaults` function. Write a Python function `def get_cfg_defaults()` to solve the following problem:
Get a yacs CfgNode object with default values for my_project.
Here is the function:
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone() | Get a yacs CfgNode object with default values for my_project. |
6,455 | import gradio as gr
import argparse
import sys
import os
from editor.build import build_editor
from configs.config import get_cfg_defaults
from comm.mylog import logger
def get_args():
parser = argparse.ArgumentParser(description='config for open chat editor')
parser.add_argument('--cfg', type=str, required=True,help='input cfg file path')
parser.add_argument('--func', type=str,default='Text2VideoEditor',help='editor function name')
args = parser.parse_args()
return args | null |
6,456 | import gradio as gr
import argparse
import sys
import os
from editor.build import build_editor
from configs.config import get_cfg_defaults
from comm.mylog import logger
def run_Text2VideoEditor_ui():
gr.Interface(
run_Text2VideoEditor_logit,
[gr.inputs.Textbox(placeholder="Enter sentence here..."), gr.Radio(["realism style", "cartoon style"], label="video style", info="Please select a video style"),],
outputs = ['text',gr.Video()],
title='Text2VideoEditor',
allow_flagging="never",
).launch() | null |
6,457 | import gradio as gr
import argparse
import sys
import os
from editor.build import build_editor
from configs.config import get_cfg_defaults
from comm.mylog import logger
def run_URL2VideoEditor_ui():
gr.Interface(
run_URL2VideoEditor_logit,
[gr.inputs.Textbox(placeholder="Enter url here...",label='enter url'), gr.Radio(["realism style", "cartoon style"], label="video style", info="Please select a video style"),],
outputs = ['text',gr.Video()],
title='URL2VideoEditor',
allow_flagging="never",
).launch() | null |
6,458 | from pathlib import Path
from fastdup.sentry import v1_sentry_handler
from fastdup.engine import Fastdup
from typing import Union
import fastdup.fastdup_controller as FD
class Fastdup(FastdupController):
"""
This class provides all fastdup capabilities as a single class.
Usage example
=============
from fastdup.engine.Fastdup
annotation_csv = '/path/to/annotation.csv'
data_dir = '/path/to/images/'
output_dir = '/path/to/fastdup_analysis'
fd = Fastdup(work_dir=output_dir)
fd.run(input_dir=data_dir, annotations=pd.read_csv(annotation_csv)
df_sim = fd.similarity()
im1_id, im2_id, sim = df_sim.iloc[0]
annot_im1, annot_im2 = fd[im1_id], fd[im2_id]
df_cc, cc_info = fd.connected_components()
"""
def __init__(self, work_dir: Union[str, Path]=None, input_dir: Union[str, Path] = None):
super().__init__(work_dir=work_dir, input_dir=input_dir)
self.vis = FastdupVisualizer(self)
def run(self,
input_dir: Union[str, Path, list] = None,
annotations: pd.DataFrame = None,
embeddings=None,
subset: list = None,
data_type: str = 'image',
overwrite: bool = False,
model_path=None,
distance='cosine',
nearest_neighbors_k: int = 2,
threshold: float = 0.9,
outlier_percentile: float = 0.05,
num_threads: int = None,
num_images: int = None,
verbose: bool = False,
license: str = None,
high_accuracy: bool = False,
cc_threshold: float = 0.96,
**kwargs):
"""
:param input_dir: Location of the images/videos to analyze
- A folder
- A remote folder (s3 or minio starting with s3:// or minio://). When using minio append the minio
server name for example minio://google/visual_db/sku110k
- A file containing absolute filenames each on its own row TODO: add support for multiple folders
- A file containing s3 full paths or minio paths each on its own row TODO: add support for multiple folders
- A python list with absolute filenames
- A python list with absolute folders, all images and videos on those folders are added recursively
- yolo-v5 yaml input file containing train and test folders (single folder supported for now) TODO: add support for yolov5 yaml file
- We support jpg, jpeg, tiff, tif, giff, heif, heic, bmp, png, mp4, avi.
In addition we support tar, tar.gz, tgz and zip files containing images
If you have other image extensions that are readable by opencv imread() you can give them in a file
(each image on its own row) and then we do not check for the known extensions and use opencv
to read those formats
Note: It is not possible to mix compressed (videos or tars/zips) and regular images.
Use the flag tar_only=True if you want to ignore images and run from compressed files
Note: We assume image sizes should be larger or equal to 10x10 pixels.
Smaller images (either on width or on height) will be ignored with a warning shown
Note: It is possible to skip small images also by defining minimum allowed file size using
min_file_size=1000 (in bytes)
Note: For performance reasons it is always preferred to copy s3 images from s3 to local disk and then
run fastdup on local disk. Since copying images from s3 in a loop is very slow, Alternatively you can
use the flag sync_s3_to_local=True to copy ahead all images on the remote s3 bucket to disk
:param annotations: Optional dataframe with annotations.
annotation dataframe should have the following columns:
- image_filename: {Mandatory}. Relative path to the image wtr to input_dir
- split: (Optional). 'train' or 'test'
- label: (Optional). Class of the image
- row_y, col_x, height, width: (Optional). Bounding box of the object in the image
if provided, fastdup will run on the bounding box instead of the whole image
- x1, y1, x2, y2, x3, y3, x4, y4: (Optional). Bounding box of the object in the image
if provided, and bounding_box=='rotated_bbox' fastdup will run on the rotated bounding box.
- additional columns can be added and will be added to the output dataframe
:param embeddings: list of embeddings, if given fastdup will be activated on the given embedding instead of the
images. The embeddings should be in the same order as the images in the annotations dataframe.
:param subset: List of images to run on. If None, run on all the images/bboxes.
:param data_type: Type of data to run on. Supported types: 'image', 'bbox'. Default is 'image'.
:param model_path: path to model for feature extraction. supported formats: onnx, ort.
Make sure to update d parameter acordingly.
:param distance: - distance metric for the Nearest Neighbors algorithm.
The default is 'cosine' which works well in most cases. For nn_provider='nnf' the following distance metrics
are supported. When using nnf_mode='Flat': 'cosine', 'euclidean', 'l1','linf','canberra',
'braycurtis','jensenshannon' are supported. Otherwise 'cosine' and 'euclidean' are supported.,
:param num_images: Number of images to run on. On default, run on all the images in the image_dir folder.
:param nearest_neighbors_k:
:param high_accuracy: Compute a more accurate model. Runtime is increased about 15% and feature vector storage
size/ memory is increased about 60%. The upside is the model can distinguish better of minute details in
images with many objects.
:param outlier_percentile: Percentile of the outlier score to use as threshold. Default is 0.5 (50%).
:param threshold: Threshold to use for the graph generation. Default is 0.9.
:param cc_threshold: Threshold to use for the graph connected component. Default is 0.96.
:param bounding_box: yolov5s|face|retated_bbox
- yolov5s: Use yolov5s to detect objects in the image and run fastdup on each object.
- face: Use face detection to detect faces in the image and run fastdup on each face.
- rotated_bbox: Use the rotated bounding given in annotation data-fram box to run fastdup on the object.
:param num_threads: Number of threads. By default, autoconfigured by the number of cores.
:param license: Optional license key. If not provided, only free features are available.
:param overwrite: Optional flag to overwrite existing fastdup results.
:param verbose: Verbosity.
:param kwargs: Additional parameters for fastdup.
:return:
- d: Model Output dimension. Default is 576.
- min_offset: Optional min offset to start iterating on the full file list.
- max_offset: Optional max offset to start iterating on the full file list.
- nnf_mode: When nn_provider='nnf' selects the nnf model mode. default is HSNW32. More accurate is Flat
- nnf_param: When nn_provider='nnf' selects assigns optional parameters.
- num_em_iter=XX: number of KMeans EM iterations to run. Default is 20.
- num_clusters=XX: number of KMeans clusters to use. Default is 100.
- batch_size = None,
- resume: Optional flag to resume tar extraction from a previous run.
- run_cc = Run connected components on the resulting similarity graph. Default is True.
- run_sentry = Default is True.,
- delete_tar = Delete tar after download from s3/minio.
- delete_img = Delete images after download from s3/minio.
- tar_only = When working with tar files obtained from cloud storage delete the tar after download
- run_stats = When working with images obtained from cloud storage delete the image after download
- sync_s3_to_local = In case of using s3 bucket sync s3 to local folder to improve performance.
Assumes there is enough local disk space to contain the dataDefault is False.
"""
# TODO: Make sure work with s3 and minio is working
input_dir = self._input_dir if input_dir is None else input_dir
fastdup_func_params = dict(ccthreshold=cc_threshold,
lower_threshold=outlier_percentile,
distance=distance,
nearest_neighbors_k=nearest_neighbors_k,
threshold=threshold,
num_threads=-1 if num_threads is None else num_threads,
num_images=0 if num_images is None else num_images,
verbose=verbose,
license='' if license is None else license,
high_accuracy=high_accuracy)
if (model_path is not None):
if 'dinov2s' not in model_path and 'dinov2b' not in model_path and 'resnet50' not in model_path and 'efficientnet' not in model_path and 'clip' not in model_path and 'clip336' not in model_path and 'clip14' not in model_path:
assert 'd' in kwargs, 'Please provide d parameter to indicate the model output dimension'
fastdup_func_params['model_path'] = model_path
fastdup_func_params.update(kwargs)
return super().run(annotations=annotations, input_dir=input_dir, subset=subset, data_type=data_type,
overwrite=overwrite, embeddings=embeddings, **fastdup_func_params)
The provided code snippet includes necessary dependencies for implementing the `create` function. Write a Python function `def create(work_dir: Union[str, Path] = None, input_dir: Union[str, Path, list] = None) -> Fastdup` to solve the following problem:
Create fastdup analyzer instance. Usage example ============= ``` import pandas as pd import fastsup annotation_csv = '/path/to/annotation.csv' data_dir = '/path/to/images/' output_dir = '/path/to/fastdup_analysis' import fastdup fd = fastdup.create(work_dir='work_dir', input_dir='images') fd.run(annotations=pd.read_csv(annotation_csv)) df_sim = fdp.similarity(data=False) im1_id, im2_id, sim = df_sim.iloc[0] annot_im1, annot_im2 = fdp[im1_id], fdp[im2_id] df_cc, cc_info = fd.connected_components() fd = Fastdup(work_dir=work_dir, input_dir=input_dir) return fd ```
Here is the function:
def create(work_dir: Union[str, Path] = None, input_dir: Union[str, Path, list] = None) -> Fastdup:
"""
Create fastdup analyzer instance.
Usage example
=============
```
import pandas as pd
import fastsup
annotation_csv = '/path/to/annotation.csv'
data_dir = '/path/to/images/'
output_dir = '/path/to/fastdup_analysis'
import fastdup
fd = fastdup.create(work_dir='work_dir', input_dir='images')
fd.run(annotations=pd.read_csv(annotation_csv))
df_sim = fdp.similarity(data=False)
im1_id, im2_id, sim = df_sim.iloc[0]
annot_im1, annot_im2 = fdp[im1_id], fdp[im2_id]
df_cc, cc_info = fd.connected_components()
fd = Fastdup(work_dir=work_dir, input_dir=input_dir)
return fd
```
"""
fd = Fastdup(work_dir=work_dir, input_dir=input_dir)
return fd | Create fastdup analyzer instance. Usage example ============= ``` import pandas as pd import fastsup annotation_csv = '/path/to/annotation.csv' data_dir = '/path/to/images/' output_dir = '/path/to/fastdup_analysis' import fastdup fd = fastdup.create(work_dir='work_dir', input_dir='images') fd.run(annotations=pd.read_csv(annotation_csv)) df_sim = fdp.similarity(data=False) im1_id, im2_id, sim = df_sim.iloc[0] annot_im1, annot_im2 = fdp[im1_id], fdp[im2_id] df_cc, cc_info = fd.connected_components() fd = Fastdup(work_dir=work_dir, input_dir=input_dir) return fd ``` |
6,459 | from fastdup import definitions
import os
from tqdm import tqdm
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `merge_webdataset_low_memory` function. Write a Python function `def merge_webdataset_low_memory(work_dir, test_dir='', num_images=None, num_images_test=None, merge_labels=False, merge_stats=False )` to solve the following problem:
Function to merge multiple image lists obtained from webdataset format by running fastdup with run_mode=1 into a single list. The following files will be created under work_dir: * atrain_features.dat.csv - list of all filenames * atrain_stats.csv - list of all image statistics (optional) * atrain_labels.csv - list of all labels (optional) Arguments: work_dir: fastdup working dir test_dir (str): test dir (optional) num_images (int): optional number of images in the work_dir to verify the number of images num_images_test (int): optional the number oif images in the test_dir to verify the number of images merge_labels (bool): if true, merges the label files merge_stats (bool) if ture, merges the image statistics files Returns: None
Here is the function:
def merge_webdataset_low_memory(work_dir, test_dir='', num_images=None, num_images_test=None, merge_labels=False, merge_stats=False ):
"""
Function to merge multiple image lists obtained from webdataset format by running fastdup with run_mode=1
into a single list.
The following files will be created under work_dir:
* atrain_features.dat.csv - list of all filenames
* atrain_stats.csv - list of all image statistics (optional)
* atrain_labels.csv - list of all labels (optional)
Arguments:
work_dir: fastdup working dir
test_dir (str): test dir (optional)
num_images (int): optional number of images in the work_dir to verify the number of images
num_images_test (int): optional the number oif images in the test_dir to verify the number of images
merge_labels (bool): if true, merges the label files
merge_stats (bool) if ture, merges the image statistics files
Returns:
None
"""
print('Going to merge filenames')
fa =open('atrain_' + FILENAME_IMAGE_LIST, 'w')
fa.write(f"{IMAGELIST_HEADER}\n")
counter = 0
files = 0
for i in tqdm(sorted(os.listdir(work_dir))):
if i.endswith(FILENAME_IMAGE_LIST) and i != 'atrain_' + FILENAME_IMAGE_LIST:
files+=1
with open(i) as f:
line='aa'
while(line is not None and line != '') :
line = f.readline().strip()
if line == IMAGELIST_HEADER:
continue
ret = line.split(',')
if len(ret) == 2:
fa.write(f'{counter},{ret[1]}\n')
counter+=1
print('Total files', files, 'total lines', counter)
fa.close()
if num_images:
assert counter == num_images
if test_dir != '':
fa =open('atrain_' + IMAGELIST_HEADER, 'a')
files = 0
for i in tqdm(sorted(os.listdir(test_dir))):
if i.endswith('features.dat.csv') and i != 'atrain_features.dat.csv':
files+=1
with open(os.path.join(test_dir , i)) as f:
line='aa'
while(line is not None and line != '' ) :
line = f.readline().strip()
if line == IMAGELIST_HEADER:
continue
ret = line.split(',')
if len(ret) == 2:
fa.write(f'{counter},{ret[1]}\n')
counter+=1
print('Total files', files, 'total lines', counter)
fa.close()
if num_images and num_images_test:
assert counter == num_images+num_images_test, "Wrong number of images"
if merge_labels:
print("Going to merge labels")
fa =open('atrain_' + FILENAME_LABELS, 'w')
fa.write(f"{LABEL_HEADER}\n")
counter = 0
files = 0
for i in tqdm(sorted(os.listdir(work_dir))):
if i.endswith(FILENAME_LABELS) and i != 'atrain_' + FILENAME_LABELS:
files+=1
with open(os.path.join(work_dir, i), 'r', encoding='latin') as f:
with open(i.replace('labels', 'features.dat')) as f1:
line = f.readline()
line0 = f1.readline()
while(line0 is not None and line0 != '' ) :
try:
line2 = line.strip()
if line2 == LABEL_HEADER:
line = f.readline()
line0 = f1.readline()
ret = line2.find(',')
if ret >= 1:
line2 = line2[ret+1:].replace(',','')
fa.write(f'{counter},{line2}\n')
counter+=1
except:
fa.write(f'{counter},N/A\n')
counter+=1
line = f.readline()
line0 = f1.readline()
print('Total files', files, 'total lines', counter)
fa.close()
if num_images:
assert counter == num_images
if test_dir != '':
fa =open('atrain_' + LABEL_HEADER, 'a')
files = 0
for i in tqdm(sorted(os.listdir(work_dir))):
if i.endswith(FILENAME_LABELS) and i != 'atrain_' + FILENAME_LABELS:
files+=1
with open(os.path.join(test_dir, i), 'r', encoding='latin') as f:
with open(os.path.join(test_dir, i.replace('labels', 'features.dat'))) as f1:
line = f.readline()
line0 = f1.readline()
while(line0 is not None and line0 != '' ) :
try:
line2 = line.strip()
if line2 == STATS_HEADER:
line = f.readline()
line0 = f1.readline()
ret = line2.find(',')
if ret >= 1:
line2 = line2[ret+1:].replace(',','')
fa.write(f'{counter},{line2}\n')
counter+=1
except:
fa.write(f'{counter},N/A\n')
counter+=1
line = f.readline()
line0 = f1.readline()
print('Total files', files, 'total lines', counter)
fa.close()
if num_images and num_images_test:
assert counter == num_images+num_images_test
if merge_stats:
print("Going to merge stats")
fa =open('atrain_stats.csv', 'w')
fa.write(f"{STATS_HEADER}\n")
counter = 0
files = 0
for i in tqdm(sorted(os.listdir('.'))):
if i.endswith('stats.csv') and i != 'atrain_stats.csv':
files+=1
with open(os.path.join(work_dir, i)) as f:
line='aa'
while(line is not None and line != '' ) :
line = f.readline().strip()
if line == STATS_HEADER:
continue
ret = line.find(',')
if ret >= 1:
fa.write(f'{counter},{line[ret+1:]}\n')
counter+=1
print('Total files', files, 'total lines', counter)
fa.close()
if num_images:
assert counter == num_images
fa =open('atrain_' + FILENAME_STATS, 'a')
files = 0
for i in tqdm(sorted(os.listdir(test_dir))):
if i.endswith(FILENAME_STATS) and i != 'atrain_' + FILENAME_STATS:
files+=1
with open(os.psth.join(test_dir, i)) as f:
line='aa'
while(line is not None and line != '') :
line = f.readline().strip()
if line == HEADER_STATS:
continue
ret = line.find(',')
if ret >= 1:
fa.write(f'{counter},{line[ret+1:]}\n')
counter+=1
print('Total files', files, 'total lines', counter)
fa.close()
if num_images and num_images_test:
assert counter == num_images + num_images_test | Function to merge multiple image lists obtained from webdataset format by running fastdup with run_mode=1 into a single list. The following files will be created under work_dir: * atrain_features.dat.csv - list of all filenames * atrain_stats.csv - list of all image statistics (optional) * atrain_labels.csv - list of all labels (optional) Arguments: work_dir: fastdup working dir test_dir (str): test dir (optional) num_images (int): optional number of images in the work_dir to verify the number of images num_images_test (int): optional the number oif images in the test_dir to verify the number of images merge_labels (bool): if true, merges the label files merge_stats (bool) if ture, merges the image statistics files Returns: None |
6,460 | from fastdup import definitions
import os
from tqdm import tqdm
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `filter_similarity_low_memory` function. Write a Python function `def filter_similarity_low_memory(work_dir, out_file, threshold)` to solve the following problem:
One fastdup runs and creates similarity.csv file, select a subset of similaritis > threshold and put them in the out_file Arguments: work_dir (str): fastdup working dir where the file similarity/csv is found, or a full path pointing to similarity.csv file out_file (str): name of output similarity file threshold: 0->1 take images >= threshold Returns None
Here is the function:
def filter_similarity_low_memory(work_dir, out_file, threshold):
"""
One fastdup runs and creates similarity.csv file, select a subset of similaritis > threshold and put them in the out_file
Arguments:
work_dir (str): fastdup working dir where the file similarity/csv is found, or a full path pointing to similarity.csv file
out_file (str): name of output similarity file
threshold: 0->1 take images >= threshold
Returns
None
"""
assert isinstance(threshold, float)
assert threshold < 1 and threshold > 0
sim_file = os.path.join(work_dir, FILENAME_SIMILARITY) if os.path.isdir(work_dir) else work_dir
assert os.path.exists(sim_file)
assert out_file != sim_file
fa = open(out_file, 'w')
fa.write(f"{SIMILARITY_HEADER, EADER}\n")
f =open(sim_file, 'r')
line = 'aa'
counter = 0
while(line is not None and line != ''):
line = f.readline().strip()
if line == SIMILARITY_HEADER:
continue
ret = line.split(',')
if len(ret) == 3:
distance = float(ret[2])
if distance > threshold:
fa.write(line + "\n")
counter +=1
if counter % 1000000 == 0:
print(counter)
fa.close() | One fastdup runs and creates similarity.csv file, select a subset of similaritis > threshold and put them in the out_file Arguments: work_dir (str): fastdup working dir where the file similarity/csv is found, or a full path pointing to similarity.csv file out_file (str): name of output similarity file threshold: 0->1 take images >= threshold Returns None |
6,461 | import os
import csv
import numpy as np
import pandas as pd
from PIL import Image
import cv2
from fastdup.image import my_resize
def register_embedding(embedding_tensor_name, meta_data_fname, log_dir, sprite_path, with_images=True):
from tensorboard.plugins import projector
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
embedding.tensor_name = embedding_tensor_name
embedding.metadata_path = meta_data_fname
if (with_images):
embedding.sprite.image_path = os.path.basename(sprite_path)
embedding.sprite.single_image_dim.extend([IMAGE_SIZE, IMAGE_SIZE])
projector.visualize_embeddings(log_dir, config)
def save_labels_tsv(labels, filepath, log_dir):
with open(os.path.join(log_dir, filepath), 'w') as f:
for label in labels:
f.write('{}\n'.format(label))
def generate_sprite_image(img_path, sample_size, log_dir, get_label_func = None, h = 0, w = 0, alternative_filename = None, alternative_width=None, max_width=None, kwargs={}):
# Generate sprite image
images_pil = []
labels = []
H = IMAGE_SIZE if h == 0 else h
W = IMAGE_SIZE if w == 0 else w
if max_width is not None and h != 0 and w != 0:
if W > max_width:
scale = 1.0*W/max_width
H = int(1.0*H/scale)
W = int(1.0*w/scale)
else:
if W > 320:
scale = 1.0*W/320
H = int(1.0*H/scale)
W = int(1.0*W/scale)
if alternative_width is not None:
NUM_IMAGES_WIDTH = alternative_width
if (alternative_width < sample_size):
sample_size = alternative_width
height = 1
elif kwargs and 'force_width' in kwargs and 'force_height' in kwargs:
assert isinstance(kwargs['force_width'], int), "force_width must be an integer"
assert isinstance(kwargs['force_height'], int), "force_height must be an integer"
if kwargs['force_width'] * kwargs['force_height'] > len(img_path):
print(f"Warning: missing images for a full grid, requested {kwargs['force_width'] * kwargs['force_height']} got {len(img_path)}")
NUM_IMAGES_WIDTH = kwargs['force_width']
height = kwargs['force_width']
else:
NUM_IMAGES_WIDTH = int(1.4*np.ceil(np.sqrt(min(sample_size, len(img_path)))))
divs = int(np.ceil(min(sample_size,len(img_path)) / NUM_IMAGES_WIDTH))
height = min(divs, NUM_IMAGES_WIDTH)
for i, im in enumerate(img_path[:sample_size]):
# Save both tf image for prediction and PIL image for sprite
if isinstance(im, str):
try:
assert os.path.exists(im)
img_pil = cv2.imread(im)
assert img_pil is not None, f"Failed to read image from {im}"
img_pil = cv2.cvtColor(img_pil, cv2.COLOR_BGR2RGB)
img_pil = cv2.resize(img_pil, (W, H))
except Exception as ex:
print("Failed to load image" + im)
continue
else:
img_pil = cv2.resize(im, (W, H))
img_pil = cv2.cvtColor(img_pil, cv2.COLOR_BGR2RGB)
images_pil.append(Image.fromarray(img_pil))
# Assuming your output data is directly the label
if callable(get_label_func):
label = get_label_func(im)
elif isinstance(get_label_func, list):
label = get_label_func[i]
else:
label = "N/A"
labels.append(label)
# Create a sprite imagei
spriteimage = Image.new(
mode='RGB',
size=(W*NUM_IMAGES_WIDTH, H*height),
color=(255,255,255)
)
for count, image in enumerate(images_pil):
h_loc = count // NUM_IMAGES_WIDTH
w_loc = count % NUM_IMAGES_WIDTH
spriteimage.paste(image, (w_loc*W, h_loc*H))
if max_width is not None:
factor = max_width / spriteimage.width
spriteimage = spriteimage.resize((int(spriteimage.width * factor), int(spriteimage.height * factor)))
if isinstance(img_path[0], str):
if alternative_filename is not None:
SPRITE_PATH =alternative_filename
else:
SPRITE_PATH= f'{log_dir}/sprite.png'
spriteimage.convert('RGB').save(SPRITE_PATH)
return SPRITE_PATH, labels
else:
return np.array(spriteimage.convert('RGB')), labels
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def export_to_tensorboard_projector_inner(imglist, features, log_dir, sample_size,
sample_method='random', with_images=True,
get_label_func=None,d = 576):
try:
if not os.path.exists(log_dir):
os.mkdir(log_dir)
except:
print('Failed to create log_dir', log_dir)
return 1
df = pd.DataFrame({'filenames':imglist})
if (sample_method == 'random'):
sample = df.sample(min(sample_size, len(df))).reset_index()
else:
print('sample method', sample_method, 'is not supported')
return
EMBEDDINGS_TENSOR_NAME = 'embeddings'
EMBEDDINGS_FPATH = os.path.join(log_dir, EMBEDDINGS_TENSOR_NAME + '.ckpt')
STEP = 0
img_path = list(sample['filenames'].values)
SPRITE_PATH, labels = generate_sprite_image(img_path, sample_size, log_dir, get_label_func)
META_DATA_FNAME = 'meta.tsv' # Labels will be stored here
register_embedding(EMBEDDINGS_TENSOR_NAME, META_DATA_FNAME, log_dir, SPRITE_PATH, with_images)
save_labels_tsv(labels, META_DATA_FNAME, log_dir)
ids = sample['index'].values
assert len(ids)
assert features.shape[1] == d, "Wrong share for the feature vectors exected {} got {}".format(d, features.shape[1])
import tensorflow as tf
tensor_embeddings = tf.Variable(features[ids,:], name=EMBEDDINGS_TENSOR_NAME)
saver = tf.compat.v1.train.Saver([tensor_embeddings]) # Must pass list or dict
saver.save(sess=None, global_step=STEP, save_path=EMBEDDINGS_FPATH)
print('Finish exporting to tensorboard projector, now run')
if 'JPY_PARENT_PID' in os.environ:
print('%load_ext tensorboard')
print(f'%tensorboard --logdir={log_dir}')
else:
print(f'tensorboard --logdir={log_dir}') | null |
6,462 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def read_local_error_file(ret, local_error_file):
if (ret != 0 and 'JPY_PARENT_PID' in os.environ) or 'COLAB_JUPYTER_IP' in os.environ:
if os.path.exists(local_error_file):
# windows can generate non ascii printouts
with open(local_error_file, "r", encoding="utf-8") as f:
error = f.read()
data_type = "error" if ret != 0 else "info"
print(f"fastdup C++ {data_type} received: ", error[:5000], "\n")
if ret != 0:
fastdup_capture_exception("C++ error", RuntimeError(error[:5000])) | null |
6,463 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
import platform
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if platform.system() == "Windows":
is_windows = True
import struct
assert struct.calcsize("P") * 8 == 64, "Detected 32 bit windows, not supported, please run with 64 bits windows"
SO_SUFFIX=".dll"
so_file = os.path.join(LOCAL_DIR, 'fastdup_shared' + SO_SUFFIX)
# https://docs.sentry.io/platforms/native/configuration/backends/crashpad/
if os.path.exists(os.path.join(LOCAL_DIR, 'crashpad_handler.exe')):
os.environ['SENTRY_CRASHPAD'] = os.path.join(LOCAL_DIR, 'crashpad_handler.exe')
elif platform.system() == "Darwin":
SO_SUFFIX=".dylib"
# https://docs.sentry.io/platforms/native/configuration/backends/crashpad/
if os.path.exists(os.path.join(LOCAL_DIR, 'lib/crashpad_handler')):
os.environ['SENTRY_CRASHPAD'] = os.path.join(LOCAL_DIR, 'lib/crashpad_handler')
else:
print('Failed to find crashpad handler on ', os.path.join(LOCAL_DIR, 'lib/crashpad_handler'))
so_file = os.path.join(LOCAL_DIR, 'libfastdup_shared' + SO_SUFFIX)
else:
SO_SUFFIX=".so"
so_file = os.path.join(LOCAL_DIR, 'libfastdup_shared' + SO_SUFFIX)
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
try:
# this should be supported only from python3.8 and up
if platform.system() == "Windows":
os.add_dll_directory(LOCAL_DIR)
#os.add_dll_directory(LOCAL_DIR + "\\lib")
os.add_dll_directory(os.path.join(os.environ['SystemRoot'], 'System32'))
#os.add_dll_directory("C:\\Program Files\\PowerShell\\7")
dll = WinDLL(so_file)
else:
dll = CDLL(so_file)
except Exception as ex:
fastdup_capture_exception("__init__", ex)
print("Please reach out to fastdup support, it seems installation is missing critical files to start fastdup.")
print("We would love to understand what has gone wrong.")
print("You can open an issue here: " + GITHUB_URL + " or email us at " + CONTACT_EMAIL)
find_command = "\"find " + LOCAL_DIR + " \""
if platform.system() == "Windows":
find_command = "\"tree " + LOCAL_DIR + " \""
print("Share out output of the command " + find_command)
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `download_from_s3` function. Write a Python function `def download_from_s3(input_dir, work_dir, verbose, is_test=False)` to solve the following problem:
Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1') Note: we assume there is enough local disk space otherwise the download may fail input_dir: input directory on s3 or minio work_dir: local working directory verbose: if verbose show progress is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER Returns: The local download directory
Here is the function:
def download_from_s3(input_dir, work_dir, verbose, is_test=False):
"""
Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1')
Note: we assume there is enough local disk space otherwise the download may fail
input_dir: input directory on s3 or minio
work_dir: local working directory
verbose: if verbose show progress
is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER
Returns: The local download directory
"""
print(f'Going to download s3 files from {input_dir} to local {work_dir}')
local_folder = S3_TEST_TEMP_FOLDER if is_test else S3_TEMP_FOLDER
if platform.system() == "Windows":
local_folder = 'testtemp' if is_test else 'temp'
if input_dir.startswith('s3://'):
endpoint = "" if "FASTDUP_S3_ENDPOINT_URL" not in os.environ else f"--endpoint-url={os.environ['FASTDUP_S3_ENDPOINT_URL']}"
command = f'aws s3 {endpoint} sync ' + input_dir + ' ' + f'{work_dir}/{local_folder}'
if not verbose:
command += ' --no-progress'
ret = os.system(command)
if ret != 0:
print('Failed to sync s3 to local. Command was ' + command)
return ret
elif input_dir.startswith('minio://'):
if platform.system() == "Windows":
assert "FASTDUP_MC_PATH" in os.environ, "Have to define FASTUP_MC_PATH environment variable to point to minio client full_path. For example C:\\Users\\danny_bickson\\mc.exe"
mc_path = os.environ["FASTDUP_MC_PATH"]
assert os.path.exists(mc_path), "Failed to find minio client on " + mc_path
command = f'{mc_path} cp --recursive ' + input_dir.replace('minio://', '') + ' ' + f'{work_dir}\\{local_folder}'
else:
command = 'mc cp --recursive ' + input_dir.replace('minio://', '') + ' ' + f'{work_dir}/{local_folder} '
if not verbose:
command += ' --quiet'
ret = os.system(command)
if ret != 0:
print('Failed to sync s3 to local. Command was: ' + command)
return ret
input_dir = f'{work_dir}/{local_folder}'
return input_dir | Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1') Note: we assume there is enough local disk space otherwise the download may fail input_dir: input directory on s3 or minio work_dir: local working directory verbose: if verbose show progress is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER Returns: The local download directory |
6,464 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def check_latest_version(curversion):
try:
if 'FASTDUP_PRODUCTION' in os.environ:
return False
import requests
try:
from packaging.version import parse
except ModuleNotFoundError as ex:
print("Failed to find packaging module, please install via `pip install setuptools`")
fastdup_capture_exception("check_latest_version", ex, True)
return False
# Search for the package on PyPI using the PyPI API
response = requests.get('https://pypi.org/pypi/fastdup/json', timeout=2)
# Get the latest version number from the API response
latest_version = parse(response.json()['info']['version'])
latest_version_num = int(str(latest_version).split(".")[0])
latest_version_frac = int(str(latest_version).split(".")[1])
latest_version = latest_version_num * 1000 + latest_version_frac
cur_version_num = int(curversion.split(".")[0])
cur_version_frac = int(curversion.split(".")[1])
if latest_version > cur_version_num * 1000 + cur_version_frac + 25:
return True
except Exception as e:
fastdup_capture_exception("check_latest_version", e, True)
return False | null |
6,465 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
def record_time():
try:
now = datetime.now()
date_time = now.strftime("%Y-%m-%d")
with open("/tmp/.timeinfo", "w") as f:
if date_time.endswith('%'):
date_time = date_time[:len(date_time) - 1]
f.write(date_time)
except Exception as ex:
fastdup_capture_exception("Timestamp", ex) | null |
6,466 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def get_images_from_path(path):
"List a subfoler recursively and get all image files supported by fastdup"
# create list to store results
assert os.path.isdir(path), "Failed to find directory " + path
filenames = []
ret = []
# get all image files
image_extensions = SUPPORTED_IMG_FORMATS
image_extensions.extend(SUPPORTED_VID_FORMATS)
filenames += glob.glob(f'{path}/**/*', recursive=True)
for r in filenames:
ext = os.path.splitext(r)
if len(ext) < 2:
continue
ext = ext[1]
if ext in image_extensions:
ret.append(r)
if len(ret) == 0:
print("Warning: failed to find any image/video files in folder " + path)
return ret
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def list_subfolders_from_file(file_path):
assert os.path.isfile(file_path)
ret = []
with open(file_path, "r") as f:
for line in f:
if os.path.isdir(line.strip()):
ret += get_images_from_path(line.strip())
assert len(ret), "Failed to find any folder listing from file " + file_path
return ret | null |
6,467 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def check_if_folder_list(file_path):
assert os.path.isfile(file_path), "Failed to find file " + file_path
if file_path.endswith('yaml'):
return False
with open(file_path, "r") as f:
for line in f:
return os.path.isdir(line.strip())
return False | null |
6,468 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def save_as_csv_file_list(filenames, files_path):
import pandas as pd
files = pd.DataFrame({'filename': filenames})
files.to_csv(files_path)
return files_path | null |
6,469 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def get_images_from_path(path):
"List a subfoler recursively and get all image files supported by fastdup"
# create list to store results
assert os.path.isdir(path), "Failed to find directory " + path
filenames = []
ret = []
# get all image files
image_extensions = SUPPORTED_IMG_FORMATS
image_extensions.extend(SUPPORTED_VID_FORMATS)
filenames += glob.glob(f'{path}/**/*', recursive=True)
for r in filenames:
ext = os.path.splitext(r)
if len(ext) < 2:
continue
ext = ext[1]
if ext in image_extensions:
ret.append(r)
if len(ret) == 0:
print("Warning: failed to find any image/video files in folder " + path)
return ret
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def expand_list_to_files(the_list):
assert len(the_list), "Got an empty list for input"
files = []
for f in the_list:
if isinstance(f, str) or isinstance(f, pathlib.PosixPath):
f = str(f)
if f.startswith("s3://") or f.startswith("minio://"):
if os.path.splitext(f.lower()) in SUPPORTED_IMG_FORMATS or os.path.splitext(
f.lower()) in SUPPORTED_VID_FORMATS:
files.append(f)
break
assert False, f"Unsupported mode: can not run on lists of s3 folders, please list all image or video files " \
f"in s3 (using `aws s3 ls <bucket name>` into a text file, and run fastdup pointing to this text file. " \
f"File was {f}, supported image and video formats are {SUPPORTED_IMG_FORMATS}, {SUPPORTED_VID_FORMATS}"
elif os.path.isfile(f):
files.append(f)
elif os.path.isdir(f):
files.extend(get_images_from_path(f))
else:
assert False, f"Unknown file type encountered in list: {f}"
assert len(files), "Failed to extract any files from list"
return files | null |
6,470 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def ls_crop_folder(path):
assert os.path.isdir(path), "Failed to find directlry " + path
files = os.listdir(path)
import pandas as pd
df = pd.DataFrame({'filename': files})
assert len(df), "Failed to find any crops in folder " + path | null |
6,471 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def merge_with_filenames_one_sided(df, filenames):
df2 = df.merge(filenames, left_on='from', right_on='index')
assert len(df2), f"Failed to merge similarity/outliers with atrain_features.dat.csv file \n{df.head()}, \n{filenames.head()}"
df = df2
return df | null |
6,472 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def merge_with_filenames_search(df, filenames):
df2 = df.merge(filenames, left_on='to', right_on='index')
assert len(df2), f"Failed to merge similarity/outliers with atrain_features.dat.csv file \n{df.head()}, \n{filenames.head()}"
df = df2
return df | null |
6,473 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def s3_partial_sync(uri: str, work_dir: str, num_images: int, verbose: bool, check_interval: int, *args) -> None:
from tqdm import tqdm
assert os.path.exists(work_dir)
local_dir = os.path.join(work_dir, "tmp")
if os.path.exists(local_dir):
assert False, f"Error: found folder {local_dir}, please remove it and try again"
if not os.path.exists(local_dir):
os.mkdir(local_dir)
assert os.path.exists(local_dir), "Failed to find work dir"
if not verbose:
arglist = ['aws', 's3', 'sync', uri, local_dir, '--quiet', *args]
else:
arglist = ['aws', 's3', 'sync', uri, local_dir, *args]
if verbose:
print('Going to run', arglist)
process = subprocess.Popen(arglist)
pbar = tqdm(desc='files', total=num_images)
while process.poll() is None:
time.sleep(check_interval)
files = os.listdir(local_dir)
# files = [f for f in files if (os.path.splitext(f.lower()) in SUPPORTED_IMG_FORMATS) or (os.path.splitext(f.lower()) in SUPPORTED_VID_FORMATS)]
pbar.update(len(files) - pbar.n)
if len(files) >= num_images:
process.terminate()
return_code = process.wait(5)
if return_code != 0:
process.kill()
break
return local_dir | null |
6,474 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
The provided code snippet includes necessary dependencies for implementing the `convert_coco_dict_to_df` function. Write a Python function `def convert_coco_dict_to_df(coco_dict: dict, input_dir: str)` to solve the following problem:
Convert dictionary in COCO format object annotations to a Fastdup DF :param coco_dict: :return: a Dataframe in the expected format for fastdup bboxes.
Here is the function:
def convert_coco_dict_to_df(coco_dict: dict, input_dir: str):
"""
Convert dictionary in COCO format object annotations to a Fastdup DF
:param coco_dict:
:return: a Dataframe in the expected format for fastdup bboxes.
"""
# merge between bounding box annotations and their image ids
assert "images" in coco_dict, f"Invalid coco format, expected 'images' field inside the dictionary, {str(coco_dict)[:250]}"
assert "annotations" in coco_dict, f"Invalid coco format, expected 'annotations' field inside the dictionary {str(coco_dict)[:250]}"
assert "categories" in coco_dict, f"Failed to find categories in dict {str(coco_dict)[:250]}"
assert isinstance(input_dir, str) or isinstance(input_dir, pathlib.Path), f"input_dir should be a str pointing to the absolute path of image location, got {input_dir}"
import pandas as pd
df = pd.merge(pd.DataFrame(coco_dict['images']).rename(columns={'width':'img_w', 'height':'img_h'}),
pd.DataFrame(coco_dict['annotations']),
left_on='id', right_on='image_id')
assert len(df), f"Failed to merge coco dict {str(coco_dict)[:250]}"
if 'rot_bb_view' in df.columns:
rotated_bb = list(df['rot_bb_view'].apply(lambda x: {'x1': x[0][0], 'y1': x[1][0],
'x2': x[0][1], 'y2': x[1][1],
'x3': x[0][2], 'y3': x[1][2],
'x4': x[0][3], 'y4': x[1][3]}).values)
assert len(rotated_bb) == len(df), f"Failed to find any bounding boxes {str(coco_dict)[:250]}"
df = pd.concat([df, pd.DataFrame(rotated_bb)], axis=1)
assert len(df), f"Failed to add rotated cols {str(coco_dict)[:250]}"
else:
bbox_df = list(df['bbox'].apply(lambda x: {'col_x': x[0], 'row_y': x[1], 'width': x[2], 'height': x[3]}).values)
assert len(bbox_df), f"Failed to find any bounding boxes {str(coco_dict)[:250]}"
df = pd.concat([df, pd.DataFrame(bbox_df)], axis=1)
assert len(df), f"Failed to add bbox cols {str(coco_dict)[:250]}"
# merge category id to extrac the category name
df = df.merge(pd.DataFrame(coco_dict['categories']), left_on='category_id', right_on='id')
assert len(df), f"Failed to merge coco dict with labels {str(coco_dict)[:250]}"
df = df.rename(columns={'file_name': 'filename', 'name': 'label'})
# df['filename'] = df['filename'].apply(lambda x: os.path.join(input_dir, x))
# those are the required fields needed by fastdup
assert 'filename' in df.columns, f"Failed to find columns in coco label dataframe {str(coco_dict)[:250]}"
if 'col_x' in df.columns:
df = df[['filename','col_x','row_y','width','height','label']]
else:
assert 'label' in df.columns, "When working with rotated bounding boxes, fastdup requires label column : <name>"
df = df[['filename', 'x1', 'y1', 'x2', 'y2', 'x3', 'y3', 'x4', 'y4', 'label']]
return df | Convert dictionary in COCO format object annotations to a Fastdup DF :param coco_dict: :return: a Dataframe in the expected format for fastdup bboxes. |
6,475 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def find_model(model_name, url):
def find_model_path(model_path, d):
if model_path.lower().startswith('dinov2') or model_path.lower() in ['efficientnet', 'resnet50', 'clip', 'clip336', 'clip14']:
# use DINOv2s/DINOv2b to run with DINOv2 models,
# case insensitive naming, e.g., dinov2s, DINOv2s, ...
if model_path.lower() == 'dinov2s':
model_path = find_model(model_path.lower(), DINOV2S_MODEL)
d = DINOV2S_MODEL_DIM
elif model_path.lower() == 'dinov2b':
model_path = find_model(model_path.lower(), DINOV2B_MODEL)
d = DINOV2B_MODEL_DIM
elif model_path.lower() == 'clip':
model_path = find_model(model_path.lower(), CLIP_MODEL)
d = CLIP_MODEL_DIM
elif model_path.lower() == 'clip336':
model_path = find_model(model_path.lower(), CLIP_MODEL2)
d = CLIP_MODEL2_DIM
elif model_path.lower() == 'clip14':
model_path = find_model(model_path.lower(), CLIP_MODEL14)
d = CLIP_MODEL14_DIM
elif model_path.lower() == "resnet50":
model_path = find_model(model_path.lower(), RESNET50_MODEL)
d = RESNET50_MODEL_DIM
elif model_path.lower() == "efficientnet":
model_path = find_model(model_path.lower(), EFFICIENTNET_MODEL)
d = EFFICIENTNET_MODEL_DIM
else:
assert False, f"Supporting dinov2 models are dinov2s and dinov2b, got {model_path}"
return model_path, d | null |
6,476 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def find_recently_created_files(directory, hours, suffixes=['.jpg', '.png', '.mp4']):
current_time = datetime.datetime.now()
ten_hours_ago = current_time - datetime.timedelta(hours=hours)
recent_images = []
recent_videos = []
file_list = returnfilelist(directory, suffixes)
for file_path in file_list:
creation_time = datetime.datetime.fromtimestamp(os.path.getctime(file_path))
# Check if the file was created in the last 10 hours
if creation_time > ten_hours_ago:
if (file_path.endswith('.png') \
or file_path.endswith('.jpg')):
recent_images.append(file_path)
if file_path.endswith('.mp4') or file_path.endswith(".avi"):
recent_videos.append(file_path)
return recent_images, recent_videos
def select_by_date(directory_to_search, work_dir):
# Example usage:
recent_images, recent_videos = find_recently_created_files(directory_to_search)
print("Recently created files:")
print("videos", len(recent_videos))
print("images", len(recent_images))
import pandas as pd
pd.DataFrame({'filename':recent_images}).to_csv(f'{work_dir}/images.csv', index=False)
pd.DataFrame({'filename':recent_videos}).to_csv(f'{work_dir}/videos.csv', index=False)
return recent_images, recent_videos | null |
6,477 | import glob
import random
import platform
from fastdup.definitions import *
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import warnings
import itertools
import pathlib
import subprocess
import time
import os
import requests
import tqdm.auto as tqdm
import tarfile
from multiprocessing import Pool
import shutil
import datetime
def shorten_path(path):
if isinstance(path, pathlib.Path):
path = str(path)
if path.startswith('~'):
path = os.path.expanduser(path)
elif path.startswith('./'):
path = path[2:]
if path.endswith('/'):
path = path[:-1]
cwd = os.getcwd()
if (path.startswith(cwd + '/')):
path = path.replace(cwd + '/', '')
return path
images_per_tar = 10000
def process_group(group):
i, input_dir, output_dir, group = group
tar_filename = f"{output_dir}/visual_layer_{str(int(i/images_per_tar)).zfill(6)}.tar"
create_tar(tar_filename, group, input_dir)
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `package_webdataset` function. Write a Python function `def package_webdataset(input_dir, work_dir, output_dir, _images_per_tar = 10000, image_suffix = ['.jpg', '.png'], num_threads=32, limit=None, exclude_dups=None, exclude_bad=None)` to solve the following problem:
Package a recusive folder of images as webdataset Args: input_dir: the input directory with images work_dir: temp working dir output_dir: the output dir for the packaged tars _images_per_tar: how many images per tar (default 10000) image_suffix: list of images suffix to package for example ['.jpg', '.png'] num_threads: (optional) number of threads to use for packaging limit: (optional) limit the number of tars created to limit exclude_dups: (optional) a pd.DataFrame of duplicates to remove or a csv file exclude_bad: (optional) a pd.DataFrame of corrupted images to remove or a csv file Returns:
Here is the function:
def package_webdataset(input_dir, work_dir, output_dir, _images_per_tar = 10000, image_suffix = ['.jpg', '.png'], num_threads=32, limit=None, exclude_dups=None, exclude_bad=None):
"""
Package a recusive folder of images as webdataset
Args:
input_dir: the input directory with images
work_dir: temp working dir
output_dir: the output dir for the packaged tars
_images_per_tar: how many images per tar (default 10000)
image_suffix: list of images suffix to package for example ['.jpg', '.png']
num_threads: (optional) number of threads to use for packaging
limit: (optional) limit the number of tars created to limit
exclude_dups: (optional) a pd.DataFrame of duplicates to remove or a csv file
exclude_bad: (optional) a pd.DataFrame of corrupted images to remove or a csv file
Returns:
"""
image_list_file = "image_list.txt"
images_per_tar = _images_per_tar
assert isinstance(image_suffix, list) and len(image_suffix)
assert isinstance(_images_per_tar, int) and _images_per_tar > 0
assert isinstance(input_dir, (str, pathlib.Path))
assert isinstance(work_dir, (str, pathlib.Path))
assert isinstance(output_dir, (str, pathlib.Path))
input_dir = shorten_path(input_dir)
work_dir = shorten_path(work_dir)
output_dir = shorten_path(output_dir)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
assert os.path.exists(input_dir)
import pandas as pd
if not input_dir.endswith(".csv"):
os.system(f'find {input_dir} -type f -name \'*{image_suffix[0]}\' > {work_dir}/{image_list_file}')
for i in range(1, len(image_suffix)):
os.system(f'find {input_dir} -type f -name \'*{image_suffix[i]}\' >> {work_dir}/{image_list_file}')
# Read image file paths from the file
with open(f'{work_dir}/{image_list_file}', "r") as file:
image_files = [line.strip() for line in file if line != "filename" ]
os.system(f'mv {work_dir}/{image_list_file} {output_dir}/')
else:
image_files = pd.read_csv(input_dir)['filename'].values
shutil.copy(input_dir, output_dir)
print('Found a total', len(image_files), 'images to package')
if exclude_dups:
if isinstance(exclude_dups, str):
shutil.copy(exclude_dups, output_dir)
exclude_dups = pd.read_csv(exclude_dups)['filename'].values
elif isinstance(exclude_dups, pd.DataFrame):
exclude_dups.to_csv(f'{output_dir}/duplicates_removed.csv')
exclude_dups = exclude_dups['filename'].values
print(exclude_dups[:10])
print(image_files[:10])
image_files = list(set(image_files) - set(exclude_dups))
print('After dedup remained with', len(image_files), 'images to package')
if exclude_bad:
if isinstance(exclude_bad, str):
shutil.copy(exclude_bad, output_dir)
exclude_bad = pd.read_csv(exclude_bad)['filename'].values
elif isinstance(exclude_bad, pd.DataFrame):
exclude_dups.to_csv(f'{output_dir}/bad_files_removed.csv')
exclude_bad = exclude_bad['filename'].values
image_files = list(set(image_files) - set(exclude_bad))
print('After corruption removal remained with', len(image_files), 'images to package')
print(image_files[:10])
# Divide image files into groups of 10,000
groups = [(i, input_dir, output_dir, image_files[i:i + images_per_tar]) for i in range(0, len(image_files), images_per_tar)]
if limit is not None:
groups = groups[:limit]
# Process each group in parallel
with Pool(num_threads) as pool:
pool.map(process_group, groups)
print("Finished packaging into", output_dir) | Package a recusive folder of images as webdataset Args: input_dir: the input directory with images work_dir: temp working dir output_dir: the output dir for the packaged tars _images_per_tar: how many images per tar (default 10000) image_suffix: list of images suffix to package for example ['.jpg', '.png'] num_threads: (optional) number of threads to use for packaging limit: (optional) limit the number of tars created to limit exclude_dups: (optional) a pd.DataFrame of duplicates to remove or a csv file exclude_bad: (optional) a pd.DataFrame of corrupted images to remove or a csv file Returns: |
6,478 | from enum import Enum
import os
import sys
import tempfile
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def get_sep():
return os.sep | null |
6,479 | import os
from fastdup.definitions import S3_TEMP_FOLDER, S3_TEST_TEMP_FOLDER
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `download_from_s3` function. Write a Python function `def download_from_s3(input_dir, work_dir, verbose, is_test=False)` to solve the following problem:
Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1') Note: we assume there is enough local disk space otherwise the download may fail input_dir: input directory on s3 or minio work_dir: local working directory verbose: if verbose show progress is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER Returns: The local download directory
Here is the function:
def download_from_s3(input_dir, work_dir, verbose, is_test=False):
"""
Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1')
Note: we assume there is enough local disk space otherwise the download may fail
input_dir: input directory on s3 or minio
work_dir: local working directory
verbose: if verbose show progress
is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER
Returns: The local download directory
"""
print(f'Going to download s3 files from {input_dir} to local {work_dir}')
local_folder = S3_TEST_TEMP_FOLDER if is_test else S3_TEMP_FOLDER
if input_dir.startswith('s3://'):
command = 'aws s3 sync ' + input_dir + ' ' + f'{work_dir}/{local_folder}'
if not verbose:
command += ' --no-progress'
ret = os.system(command)
if ret != 0:
print('Failed to sync s3 to local. Command was aws s3 sync ' + input_dir + ' ' + f'{work_dir}/{local_folder}')
return ret
elif input_dir.startswith('minio://'):
command = 'mc cp --recursive ' + input_dir.replace('minio://', '') + ' ' + f'{work_dir}/{local_folder} '
if not verbose:
command += ' --quiet'
ret = os.system(command)
if ret != 0:
print('Failed to sync s3 to local. Command was: mc cp --recursive ' + input_dir.replace('minio://', '') + ' ' + f'{work_dir}/{local_folder}')
return ret
input_dir = f'{work_dir}/{local_folder}'
return input_dir | Download files from S3 to local disk (called only in case of turi_param='sync_s3_to_local=1') Note: we assume there is enough local disk space otherwise the download may fail input_dir: input directory on s3 or minio work_dir: local working directory verbose: if verbose show progress is_test: If this is a test folder save it on S3_TEST_TEMP_FOLDER otherwise on S3_TEMP_FOLDER Returns: The local download directory |
6,480 | import os
from fastdup.definitions import S3_TEMP_FOLDER, S3_TEST_TEMP_FOLDER
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
def check_latest_version(curversion):
try:
import requests
from packaging.version import parse
# Search for the package on PyPI using the PyPI API
response = requests.get('https://pypi.org/pypi/fastdup/json')
# Get the latest version number from the API response
latest_version = parse(response.json()['info']['version'])
latest_version = (int)(float(str(latest_version))*1000)
if latest_version > (int)(float(curversion)*1000)+10:
return True
except Exception as e:
fastdup_capture_exception("check_latest_version", e, True)
return False | null |
6,481 | import os
from fastdup.definitions import S3_TEMP_FOLDER, S3_TEST_TEMP_FOLDER
from datetime import datetime
from fastdup.sentry import fastdup_capture_exception
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
def record_time():
try:
now = datetime.now()
date_time = now.strftime("%Y-%m-%d")
with open("/tmp/.timeinfo", "w") as f:
if date_time.endswith('%'):
date_time = date_time[:len(date_time)-1]
f.write(date_time)
except Exception as ex:
fastdup_capture_exception("Timestamp", ex) | null |
6,482 | import json
import os
import tempfile
import warnings
from typing import List, Union, Tuple
import numpy as np
import pandas as pd
from pathlib import Path
import fastdup
from pandas.errors import EmptyDataError
import shutil
import fastdup.definitions as FD
from fastdup.sentry import v1_sentry_handler, fastdup_capture_exception, fastdup_capture_log_debug_state
from fastdup.definitions import FOLDER_FULL_IMAGE_RUN
from fastdup.utils import convert_coco_dict_to_df, shorten_path
import pathlib
import re
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def is_fastdup_dir(work_dir):
return os.path.exists(Path(work_dir) / FD.MAPPING_CSV) and \
os.path.exists(Path(work_dir) / FD.FILENAME_NNF_INDEX) and \
os.path.exists(Path(work_dir) / FD.ANNOT_PKL) | null |
6,483 | import json
import os
import tempfile
import warnings
from typing import List, Union, Tuple
import numpy as np
import pandas as pd
from pathlib import Path
import fastdup
from pandas.errors import EmptyDataError
import shutil
import fastdup.definitions as FD
from fastdup.sentry import v1_sentry_handler, fastdup_capture_exception, fastdup_capture_log_debug_state
from fastdup.definitions import FOLDER_FULL_IMAGE_RUN
from fastdup.utils import convert_coco_dict_to_df, shorten_path
import pathlib
import re
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def clean_work_dir(work_dir):
if os.path.isfile(str(Path(work_dir) / FD.MAPPING_CSV)):
os.remove(str(Path(work_dir) / FD.MAPPING_CSV))
if os.path.isfile(str(Path(work_dir) / FD.FILENAME_NNF_INDEX)):
os.remove(str(Path(work_dir) / FD.FILENAME_NNF_INDEX))
if os.path.isfile(str(Path(work_dir) / FD.ANNOT_PKL)):
os.remove(str(Path(work_dir) / FD.ANNOT_PKL))
if os.path.isfile(str(Path(work_dir) / FD.FEATURES_DATA)):
os.remove(str(Path(work_dir) / FD.FEATURES_DATA))
if os.path.isfile(str(Path(work_dir) / FD.FILENAME_ERROR_MSG)):
os.remove(str(Path(work_dir) / FD.FILENAME_ERROR_MSG)) | null |
6,484 | import json
import os
import tempfile
import warnings
from typing import List, Union, Tuple
import numpy as np
import pandas as pd
from pathlib import Path
import fastdup
from pandas.errors import EmptyDataError
import shutil
import fastdup.definitions as FD
from fastdup.sentry import v1_sentry_handler, fastdup_capture_exception, fastdup_capture_log_debug_state
from fastdup.definitions import FOLDER_FULL_IMAGE_RUN
from fastdup.utils import convert_coco_dict_to_df, shorten_path
import pathlib
import re
The provided code snippet includes necessary dependencies for implementing the `set_fastdup_kwargs` function. Write a Python function `def set_fastdup_kwargs(input_kwargs: dict) -> dict` to solve the following problem:
override default arguments in fastdup args with users-input :param input_kwargs:iunput kwargs to init function :return: updated dict
Here is the function:
def set_fastdup_kwargs(input_kwargs: dict) -> dict:
"""
override default arguments in fastdup args with users-input
:param input_kwargs:iunput kwargs to init function
:return: updated dict
"""
fastdup_params = {
'input_dir', 'work_dir', 'test_dir', 'compute', 'verbose', 'num_threads', 'num_images', 'distance',
'threshold', 'lower_threshold', 'model_path', 'license', 'version', 'nearest_neighbors_k', 'd', 'run_mode',
'nn_provider', 'min_offset', 'max_offset', 'nnf_mode', 'nnf_param', 'bounding_box', 'batch_size', 'resume',
'high_accuracy'
}
turi_params = {
'nnmodel': {'map': {'brute_force': 0, 'ball_tree': 1, 'lsh': 2}, 'default': 'brute_force'},
'ccthreshold': {'map': None, 'default': 0.96},
'run_cc': {'map': {True: 1, False: 0}, 'default': True},
'run_sentry': {'map': {True: 1, False: 0}, 'default': True},
'delete_tar': {'map': {True: 1, False: 0}, 'default': False},
'delete_img': {'map': {True: 1, False: 0}, 'default': False},
'tar_only': {'map': {True: 1, False: 0}, 'default': False},
'run_stats': {'map': {True: 1, False: 0}, 'default': True},
'run_stats_only': {'map': {True: 1, False: 0}, 'default': False},
'run_advanced_stats': {'map': {True: 1, False: 0}, 'default': False},
'sync_s3_to_local': {'map': {True: 1, False: 0}, 'default': False},
'store_int': {'map': {True: 1, False: 0}, 'default': True},
'shorten_filenames': {'map': {True: 1, False: 0}, 'default': False},
'save_crops': {'map': {True: 1, False: 0}, 'default': False},
'augmentation_horiz': {'map': None, 'default': 0.2},
'augmentation_vert': {'map': None, 'default': 0.2},
'augmentation_additive_margin': {'map': None, 'default': 0},
'num_onnx_inter_threads': {'map': None, 'default': 0},
'num_onnx_intra_threads': {'map': None, 'default': 0},
'is_clip14_model': {'map': {True: 1, False: 0}, 'default': False},
#'run_labels': {'map': {True: 1, False: 0}, 'default': True},
#'run_read_filenames': {'map': {True: 1, False: 0}, 'default': True},
#'min_file_size': {'map': None, 'default': 0},
#'read_features_parallel': {'map': None, 'default': 0},
#'test_correction_offset': {'map': None, 'default': 0},
#'max_augmentations': {'map': None, 'default': 1},
#'augmentation_type': {'map': None, 'default': 0},
#'is_ultraface_model': {'map': {True: 1, False: 0}, 'default': False},
#'is_yolo_model': {'map': {True: 1, False: 0}, 'default': False},
'min_input_image_height': {'map': None, 'default': 10},
'min_input_image_width': {'map': None, 'default': 10},
'save_thumbnails': {'map': {True: 1, False: 0}, 'default': False},
'find_regex': {'map': None, 'default': ""},
'no_sort': {'map': {True: 1, False: 0}, 'default': False},
'quiet': {'map': {True: 1, False: 0}, 'default': False},
'fastdup_ocr_lang': {'map': None, 'default': "en"},
'fastdup_ocr_no_crop': {'map': {True: 1, False: 0}, 'default': False}
}
for key, value in input_kwargs.items():
if key not in fastdup_params and key not in turi_params:
raise ValueError(f'invalid argument {key}, allowed fastdup params are {fastdup_params}, allowed turi_param values are {turi_params}')
turi_kwargs = []
for arg_name, param in turi_params.items():
map_dict = param['map']
map_func = lambda x: x if map_dict is None else map_dict[x]
value = input_kwargs.get(arg_name, param['default'])
turi_kwargs.append(f'{arg_name}={map_func(value)}')
fastdup_kwargs = {key: value for key, value in input_kwargs.items() if key in fastdup_params}
fastdup_kwargs['turi_param'] = ','.join(turi_kwargs)
return fastdup_kwargs | override default arguments in fastdup args with users-input :param input_kwargs:iunput kwargs to init function :return: updated dict |
6,485 | import json
import os
import tempfile
import warnings
from typing import List, Union, Tuple
import numpy as np
import pandas as pd
from pathlib import Path
import fastdup
from pandas.errors import EmptyDataError
import shutil
import fastdup.definitions as FD
from fastdup.sentry import v1_sentry_handler, fastdup_capture_exception, fastdup_capture_log_debug_state
from fastdup.definitions import FOLDER_FULL_IMAGE_RUN
from fastdup.utils import convert_coco_dict_to_df, shorten_path
import pathlib
import re
def get_input_dir(_input_dir):
def pathlib_or_s3_cast(c):
c = str(c)
return c if (c.startswith('s3://') or c.startswith("smb://") or c.startswith("minio://")) else Path(c)
return [pathlib_or_s3_cast(f) for f in _input_dir] if isinstance(_input_dir, list)\
else pathlib_or_s3_cast(_input_dir) | null |
6,486 | import os
import shutil
import numpy as np
import pandas as pd
from PIL import Image
import tempfile
from pathlib import Path
import cv2
from fastdup.fastdup_controller import FastdupController
from webcolors import rgb_to_name
import fastdup.definitions as FD
import matplotlib.pyplot as plt
def gen_data(output_dir, n_valid_single_bbox, n_valid_double_bbox, n_duplicated_bbox,
n_corrupted_image, n_no_image):
colors = []
color_idx = 0
total_samples = n_valid_single_bbox + n_valid_double_bbox + n_duplicated_bbox + \
n_corrupted_image + n_no_image
valid_colors_single_bbox, valid_colors_double_bbox, corrupted_bbox, duplicated_bbox, no_image_bbox = \
[], [], [], [], []
while len(colors) < total_samples * 3:
color = tuple(np.random.choice(range(256), size=3))
if color not in colors:
colors.append(color)
# create single bbox valid images
for bg_color, bbox_color in zip(colors[color_idx:color_idx + n_valid_single_bbox],
colors[color_idx + n_valid_single_bbox:color_idx + 2*n_valid_single_bbox]):
bbox_x, bbox_y, bbox_h, bbox_w = np.random.randint(0, 180), np.random.randint(0, 180), \
np.random.randint(10, 60), np.random.randint(10, 60)
save_bbox_im(output_dir, bg_color, [bbox_color], [bbox_x], [bbox_y], [bbox_h], [bbox_w])
valid_colors_single_bbox.append(get_df_dict([bg_color, bbox_color], bbox_color, bbox_x, bbox_y, bbox_h, bbox_w))
color_idx += 2*n_valid_single_bbox
# create double bbox images
for bg_color, bbox_color1, bbox_color2 in zip(colors[color_idx:color_idx + n_valid_single_bbox],
colors[color_idx + n_valid_single_bbox:color_idx + 2*n_valid_single_bbox],
colors[color_idx + 2*n_valid_single_bbox:color_idx + 3*n_valid_single_bbox]):
bbox_x_1, bbox_y_1, bbox_h_1, bbox_w_1 = np.random.randint(0, 50), np.random.randint(0, 50), \
np.random.randint(10, 60), np.random.randint(10, 60)
bbox_x_2, bbox_y_2, bbox_h_2, bbox_w_2 = np.random.randint(120, 180), np.random.randint(120, 180), \
np.random.randint(10, 60), np.random.randint(10, 60)
save_bbox_im(output_dir, bg_color, [bbox_color1, bbox_color2], [bbox_x_1, bbox_x_2],
[bbox_y_1, bbox_y_2], [bbox_h_1, bbox_h_2], [bbox_w_1, bbox_w_2])
valid_colors_double_bbox.append(get_df_dict([bg_color, bbox_color1, bbox_color2], bbox_color1, bbox_x_1, bbox_y_1, bbox_h_1, bbox_w_1))
valid_colors_double_bbox.append(get_df_dict([bg_color, bbox_color1, bbox_color2], bbox_color2, bbox_x_2, bbox_y_2, bbox_h_2, bbox_w_2))
color_idx += 3*n_valid_single_bbox
# create duplicated bbox images
for bg_color1, bg_color2, bbox_color in zip(colors[color_idx:color_idx + n_valid_single_bbox],
colors[color_idx + n_valid_single_bbox:color_idx + 2 * n_valid_single_bbox],
colors[color_idx + 2 * n_valid_single_bbox:color_idx + 3 * n_valid_single_bbox]):
bbox_x1, bbox_y1 = np.random.randint(0, 180), np.random.randint(0, 180)
bbox_x2, bbox_y2 = np.random.randint(0, 180), np.random.randint(0, 180)
bbox_h, bbox_w = np.random.randint(10, 60), np.random.randint(10, 60)
save_bbox_im(output_dir, bg_color1, [bbox_color], [bbox_x1], [bbox_y1], [bbox_h], [bbox_w], suffix='_1_duplicate')
save_bbox_im(output_dir, bg_color2, [bbox_color], [bbox_x2], [bbox_y2], [bbox_h], [bbox_w], suffix='_2_duplicate')
duplicated_bbox.append(get_df_dict([bg_color1, bbox_color], bbox_color, bbox_x1, bbox_y1, bbox_h, bbox_w, suffix='_1_duplicate'))
duplicated_bbox.append(get_df_dict([bg_color2, bbox_color], bbox_color, bbox_x2, bbox_y2, bbox_h, bbox_w, suffix='_2_duplicate'))
color_idx += 3*n_valid_single_bbox
# create corrupted images
for color in colors[color_idx:color_idx + n_corrupted_image]:
bbox_x, bbox_y, bbox_h, bbox_w = np.random.randint(0, 180), np.random.randint(0, 180), \
np.random.randint(10, 60), np.random.randint(10, 60)
create_corrupted_image(output_dir, [color])
corrupted_bbox.append(get_df_dict([color], color, bbox_x, bbox_y, bbox_h, bbox_w, suffix='_corrupted'))
color_idx += n_corrupted_image
# create annot with no image
for color in colors[color_idx:color_idx + n_no_image]:
bbox_x, bbox_y, bbox_h, bbox_w = np.random.randint(0, 180), np.random.randint(0, 180), \
np.random.randint(10, 60), np.random.randint(10, 60)
no_image_bbox.append(get_df_dict([color], color, bbox_x, bbox_y, bbox_h, bbox_w, suffix='_no_image'))
color_idx += n_no_image
return pd.DataFrame(valid_colors_single_bbox), pd.DataFrame(valid_colors_double_bbox), \
pd.DataFrame(duplicated_bbox), pd.DataFrame(corrupted_bbox), pd.DataFrame(no_image_bbox)
def create_invalid_bbox_ims(output_dir):
invalid_bbox = []
color = tuple(np.random.choice(range(256), size=3))
# list of invalid bbox x, y, h, w. h,
# w < 10, or w < 10 or x >= 256 or y >= 256 or x < 0 or y < 0 or x + w > 256 or y + h > 256
invalid_bbox_args = [
(-1, -1, -1, -1),
(0, 0, 0, 0),
(0, 0, 0, 9),
(0, 0, 9, 0),
(0, 0, 9, 9),
(0, 0, 9, 10),
(0, 0, 10, 9),
(70, 70, 10, 9),
(70, 70, 9, 10),
(56, 56, 201, 90),
(56, 56, 90, 201),
(-1, 56, 90, 90),
(56, -1, 90, 90),
(256, 56, 90, 90),
(56, 256, 90, 90),
]
for i, (bbox_x, bbox_y, bbox_h, bbox_w) in enumerate(invalid_bbox_args):
save_bbox_im(output_dir, color, [color], [0], [0], [10], [10], suffix=f'_{i}_invalid_bbox')
invalid_bbox.append(get_df_dict([color, color], color, bbox_x, bbox_y, bbox_h, bbox_w, suffix=f'_{i}_invalid_bbox'))
return pd.DataFrame(invalid_bbox)
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def create_synthetic_data(target_dir, n_valid_single_bbox=50, n_valid_double_bbox=50, n_duplicated_bbox=21,
n_corrupted_image=22, n_no_image=23):
#shutil.rmtree(target_dir, ignore_errors=True)
os.makedirs(target_dir, exist_ok=True)
df_single, df_double, df_duplicate, df_corrupted, df_no_image = \
gen_data(target_dir, n_valid_single_bbox, n_valid_double_bbox, n_duplicated_bbox, n_corrupted_image, n_no_image)
df_invalid_bbox = create_invalid_bbox_ims(target_dir)
df_annot = pd.concat([df_single, df_double, df_duplicate, df_corrupted, df_no_image, df_invalid_bbox])
return df_annot, df_invalid_bbox, df_single, df_double, df_duplicate, df_corrupted, df_no_image | null |
6,487 | from fastdup.sentry import fastdup_capture_exception
import os
cat = {0: u'__background__',
1: u'person',
2: u'bicycle',
3: u'car',
4: u'motorcycle',
5: u'airplane',
6: u'bus',
7: u'train',
8: u'truck',
9: u'boat',
10: u'traffic light',
11: u'fire hydrant',
12: u'stop sign',
13: u'parking meter',
14: u'bench',
15: u'bird',
16: u'cat',
17: u'dog',
18: u'horse',
19: u'sheep',
20: u'cow',
21: u'elephant',
22: u'bear',
23: u'zebra',
24: u'giraffe',
25: u'backpack',
26: u'umbrella',
27: u'handbag',
28: u'tie',
29: u'suitcase',
30: u'frisbee',
31: u'skis',
32: u'snowboard',
33: u'sports ball',
34: u'kite',
35: u'baseball bat',
36: u'baseball glove',
37: u'skateboard',
38: u'surfboard',
39: u'tennis racket',
40: u'bottle',
41: u'wine glass',
42: u'cup',
43: u'fork',
44: u'knife',
45: u'spoon',
46: u'bowl',
47: u'banana',
48: u'apple',
49: u'sandwich',
50: u'orange',
51: u'broccoli',
52: u'carrot',
53: u'hot dog',
54: u'pizza',
55: u'donut',
56: u'cake',
57: u'chair',
58: u'couch',
59: u'potted plant',
60: u'bed',
61: u'dining table',
62: u'toilet',
63: u'tv',
64: u'laptop',
65: u'mouse',
66: u'remote',
67: u'keyboard',
68: u'cell phone',
69: u'microwave',
70: u'oven',
71: u'toaster',
72: u'sink',
73: u'refrigerator',
74: u'book',
75: u'clock',
76: u'vase',
77: u'scissors',
78: u'teddy bear',
79: u'hair drier',
80: u'toothbrush'}
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def read_coco_labels(path):
label_dict = {}
files = os.listdir(path)
for f in files:
if f.endswith('.txt'):
try:
with open(os.path.join(path, f)) as w:
line = w.read()
if (line.strip() == ''):
continue
int_cat = int(line.split(' ')[0])
#print('cat is', cat[int_cat])
label_dict[os.path.join(path.replace('labels','images'),f).replace('.txt','.jpg')] = cat[int_cat]
except Exception as ex:
fastdup_capture_exception(f'Failed to read coco file {os.path.join(path, f)}', ex)
return label_dict | null |
6,488 | import warnings
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `column_or_1d` function. Write a Python function `def column_or_1d(y, *, warn=False)` to solve the following problem:
Ravel column or 1d numpy array, else raises an error. Parameters ---------- y : array-like Input data. warn : bool, default=False To control display of warnings. Returns ------- y : ndarray Output data. Raises ------ ValueError If `y` is not a 1D array or a 2D array with a single row or column.
Here is the function:
def column_or_1d(y, *, warn=False):
"""Ravel column or 1d numpy array, else raises an error.
Parameters
----------
y : array-like
Input data.
warn : bool, default=False
To control display of warnings.
Returns
-------
y : ndarray
Output data.
Raises
------
ValueError
If `y` is not a 1D array or a 2D array with a single row or column.
"""
y = np.asarray(y)
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
return np.ravel(y)
raise ValueError(
"y should be a 1d array, got an array of shape {} instead.".format(shape)
) | Ravel column or 1d numpy array, else raises an error. Parameters ---------- y : array-like Input data. warn : bool, default=False To control display of warnings. Returns ------- y : ndarray Output data. Raises ------ ValueError If `y` is not a 1D array or a 2D array with a single row or column. |
6,489 | import warnings
import numpy as np
def unique_labels(truth, pred):
total = truth.copy()
total.extend(pred)
return np.array(sorted(np.unique(total)))
def precision_recall_fscore_support(
y_true,
y_pred,
*,
beta=1.0,
labels=None,
pos_label=1,
average=None,
warn_for=("precision", "recall", "f-score"),
sample_weight=None,
zero_division="warn",
):
"""Compute precision, recall, F-measure and support for each class.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label a negative sample as
positive.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, default=1.0
The strength of recall versus precision in the F-score.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, default=1
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : {'binary', 'micro', 'macro', 'samples', 'weighted'}, \
default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division:
- recall: when there are no positive labels
- precision: when there are no positive predictions
- f-score: both
If set to "warn", this acts as 0, but warnings are also raised.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision score.
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall score.
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score.
support : None (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
Notes
-----
When ``true positive + false positive == 0``, precision is undefined.
When ``true positive + false negative == 0``, recall is undefined.
In such cases, by default the metric will be set to 0, as will f-score,
and ``UndefinedMetricWarning`` will be raised. This behavior can be
modified with ``zero_division``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_.
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`_.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
(array([0. , 0. , 0.66...]),
array([0., 0., 1.]), array([0. , 0. , 0.8]),
array([2, 2, 2]))
"""
#_check_zero_division(zero_division)
#if beta < 0:
# raise ValueError("beta should be >=0 in the F-beta score")
labels = _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
# Calculate tp_sum, pred_sum, true_sum ###
samplewise = average == "samples"
MCM = multilabel_confusion_matrix(
y_true,
y_pred,
sample_weight=sample_weight,
labels=labels,
samplewise=samplewise,
)
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta**2
# Divide, and on zero-division, set scores and/or warn according to
# zero_division:
precision = _prf_divide(
tp_sum, pred_sum, "precision", "predicted", average, warn_for, zero_division
)
recall = _prf_divide(
tp_sum, true_sum, "recall", "true", average, warn_for, zero_division
)
# warn for f-score only if zero_division is warn, it is in warn_for
# and BOTH prec and rec are ill-defined
# if zero_division == "warn" and ("f-score",) == warn_for:
# if (pred_sum[true_sum == 0] == 0).any():
# _warn_prf(average, "true nor predicted", "F-score is", len(true_sum))
# if tp == 0 F will be 1 only if all predictions are zero, all labels are
# zero, and zero_division=1. In all other case, 0
if np.isposinf(beta):
f_score = recall
else:
denom = beta2 * precision + recall
denom[denom == 0.0] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
zero_division_value = np.float64(1.0)
if zero_division in ["warn", 0]:
zero_division_value = np.float64(0.0)
# precision is zero_division if there are no positive predictions
# recall is zero_division if there are no positive labels
# fscore is zero_division if all labels AND predictions are
# negative
if pred_sum.sum() == 0:
return (
zero_division_value,
zero_division_value,
zero_division_value,
None,
)
else:
return (np.float64(0.0), zero_division_value, np.float64(0.0), None)
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
The provided code snippet includes necessary dependencies for implementing the `classification_report` function. Write a Python function `def classification_report( y_true, y_pred, *, labels=None, target_names=None, sample_weight=None, digits=2, output_dict=False, zero_division="warn", )` to solve the following problem:
Build a text report showing the main classification metrics. Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array-like of shape (n_labels,), default=None Optional list of label indices to include in the report. target_names : list of str of shape (n_labels,), default=None Optional display names matching the labels (same order). sample_weight : array-like of shape (n_samples,), default=None Sample weights. digits : int, default=2 Number of digits for formatting output floating point values. When ``output_dict`` is ``True``, this will be ignored and the returned values will not be rounded. output_dict : bool, default=False If True, return output as dict. .. versionadded:: 0.20 zero_division : "warn", 0 or 1, default="warn" Sets the value to return when there is a zero division. If set to "warn", this acts as 0, but warnings are also raised. Returns ------- report : str or dict Text summary of the precision, recall, F1 score for each class. Dictionary returned if output_dict is True. Dictionary has the following structure:: {'label 1': {'precision':0.5, 'recall':1.0, 'f1-score':0.67, 'support':1}, 'label 2': { ... }, ... } The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class with a subset of classes, because it corresponds to accuracy otherwise and would be the same for all metrics. See also :func:`precision_recall_fscore_support` for more details on averages. Note that in binary classification, recall of the positive class is also known as "sensitivity"; recall of the negative class is "specificity". See Also -------- precision_recall_fscore_support: Compute precision, recall, F-measure and support for each class. confusion_matrix: Compute confusion matrix to evaluate the accuracy of a classification. multilabel_confusion_matrix: Compute a confusion matrix for each class or sample. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> accuracy 0.60 5 macro avg 0.50 0.56 0.49 5 weighted avg 0.70 0.60 0.61 5 <BLANKLINE> >>> y_pred = [1, 1, 0] >>> y_true = [1, 1, 1] >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3])) precision recall f1-score support <BLANKLINE> 1 1.00 0.67 0.80 3 2 0.00 0.00 0.00 0 3 0.00 0.00 0.00 0 <BLANKLINE> micro avg 1.00 0.67 0.80 3 macro avg 0.33 0.22 0.27 3 weighted avg 1.00 0.67 0.80 3 <BLANKLINE>
Here is the function:
def classification_report(
y_true,
y_pred,
*,
labels=None,
target_names=None,
sample_weight=None,
digits=2,
output_dict=False,
zero_division="warn",
):
"""Build a text report showing the main classification metrics.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : list of str of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.20
zero_division : "warn", 0 or 1, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
Returns
-------
report : str or dict
Text summary of the precision, recall, F1 score for each class.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'precision':0.5,
'recall':1.0,
'f1-score':0.67,
'support':1},
'label 2': { ... },
...
}
The reported averages include macro average (averaging the unweighted
mean per label), weighted average (averaging the support-weighted mean
per label), and sample average (only for multilabel classification).
Micro average (averaging the total true positives, false negatives and
false positives) is only shown for multi-label or multi-class
with a subset of classes, because it corresponds to accuracy
otherwise and would be the same for all metrics.
See also :func:`precision_recall_fscore_support` for more details
on averages.
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
See Also
--------
precision_recall_fscore_support: Compute precision, recall, F-measure and
support for each class.
confusion_matrix: Compute confusion matrix to evaluate the accuracy of a
classification.
multilabel_confusion_matrix: Compute a confusion matrix for each class or sample.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
accuracy 0.60 5
macro avg 0.50 0.56 0.49 5
weighted avg 0.70 0.60 0.61 5
<BLANKLINE>
>>> y_pred = [1, 1, 0]
>>> y_true = [1, 1, 1]
>>> print(classification_report(y_true, y_pred, labels=[1, 2, 3]))
precision recall f1-score support
<BLANKLINE>
1 1.00 0.67 0.80 3
2 0.00 0.00 0.00 0
3 0.00 0.00 0.00 0
<BLANKLINE>
micro avg 1.00 0.67 0.80 3
macro avg 0.33 0.22 0.27 3
weighted avg 1.00 0.67 0.80 3
<BLANKLINE>
"""
#y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
labels_given = False
else:
labels = np.asarray(labels)
labels_given = True
if len(labels) == 2:
y_type = 'binary'
else:
y_type = 'multiclass'
# labelled micro average
micro_is_accuracy = (y_type == "multiclass" or y_type == "binary") and (
not labels_given or (set(labels) == set(unique_labels(y_true, y_pred)))
)
if target_names is not None and len(labels) != len(target_names):
if labels_given:
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}".format(
len(labels), len(target_names)
)
)
else:
raise ValueError(
"Number of classes, {0}, does not match size of "
"target_names, {1}. Try specifying the labels "
"parameter".format(len(labels), len(target_names))
)
if target_names is None:
target_names = ["%s" % l for l in labels]
headers = ["precision", "recall", "f1-score", "support"]
# compute per-class results without averaging
p, r, f1, s = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division,
)
rows = zip(target_names, p, r, f1, s)
if y_type.startswith("multilabel"):
average_options = ("micro", "macro", "weighted", "samples")
else:
average_options = ("micro", "macro", "weighted")
if output_dict:
report_dict = {label[0]: label[1:] for label in rows}
for label, scores in report_dict.items():
report_dict[label] = dict(zip(headers, [i.item() for i in scores]))
else:
longest_last_line_heading = "weighted avg"
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(longest_last_line_heading), digits)
head_fmt = "{:>{width}s} " + " {:>9}" * len(headers)
report = head_fmt.format("", *headers, width=width)
report += "\n\n"
row_fmt = "{:>{width}s} " + " {:>9.{digits}f}" * 3 + " {:>9}\n"
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += "\n"
# compute all applicable averages
for average in average_options:
if average.startswith("micro") and micro_is_accuracy:
line_heading = "accuracy"
else:
line_heading = average + " avg"
# compute averages with specified averaging method
avg_p, avg_r, avg_f1, _ = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
average=average,
sample_weight=sample_weight,
zero_division=zero_division,
)
avg = [avg_p, avg_r, avg_f1, np.sum(s)]
if output_dict:
report_dict[line_heading] = dict(zip(headers, [i.item() for i in avg]))
else:
if line_heading == "accuracy":
row_fmt_accuracy = (
"{:>{width}s} "
+ " {:>9.{digits}}" * 2
+ " {:>9.{digits}f}"
+ " {:>9}\n"
)
report += row_fmt_accuracy.format(
line_heading, "", "", *avg[2:], width=width, digits=digits
)
else:
report += row_fmt.format(line_heading, *avg, width=width, digits=digits)
if output_dict:
if "accuracy" in report_dict.keys():
report_dict["accuracy"] = report_dict["accuracy"]["precision"]
return report_dict
else:
return report | Build a text report showing the main classification metrics. Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array-like of shape (n_labels,), default=None Optional list of label indices to include in the report. target_names : list of str of shape (n_labels,), default=None Optional display names matching the labels (same order). sample_weight : array-like of shape (n_samples,), default=None Sample weights. digits : int, default=2 Number of digits for formatting output floating point values. When ``output_dict`` is ``True``, this will be ignored and the returned values will not be rounded. output_dict : bool, default=False If True, return output as dict. .. versionadded:: 0.20 zero_division : "warn", 0 or 1, default="warn" Sets the value to return when there is a zero division. If set to "warn", this acts as 0, but warnings are also raised. Returns ------- report : str or dict Text summary of the precision, recall, F1 score for each class. Dictionary returned if output_dict is True. Dictionary has the following structure:: {'label 1': {'precision':0.5, 'recall':1.0, 'f1-score':0.67, 'support':1}, 'label 2': { ... }, ... } The reported averages include macro average (averaging the unweighted mean per label), weighted average (averaging the support-weighted mean per label), and sample average (only for multilabel classification). Micro average (averaging the total true positives, false negatives and false positives) is only shown for multi-label or multi-class with a subset of classes, because it corresponds to accuracy otherwise and would be the same for all metrics. See also :func:`precision_recall_fscore_support` for more details on averages. Note that in binary classification, recall of the positive class is also known as "sensitivity"; recall of the negative class is "specificity". See Also -------- precision_recall_fscore_support: Compute precision, recall, F-measure and support for each class. confusion_matrix: Compute confusion matrix to evaluate the accuracy of a classification. multilabel_confusion_matrix: Compute a confusion matrix for each class or sample. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> accuracy 0.60 5 macro avg 0.50 0.56 0.49 5 weighted avg 0.70 0.60 0.61 5 <BLANKLINE> >>> y_pred = [1, 1, 0] >>> y_true = [1, 1, 1] >>> print(classification_report(y_true, y_pred, labels=[1, 2, 3])) precision recall f1-score support <BLANKLINE> 1 1.00 0.67 0.80 3 2 0.00 0.00 0.00 0 3 0.00 0.00 0.00 0 <BLANKLINE> micro avg 1.00 0.67 0.80 3 macro avg 0.33 0.22 0.27 3 weighted avg 1.00 0.67 0.80 3 <BLANKLINE> |
6,490 | import os
import random
import json
import glob
import cv2
import shutil
import zipfile
from fastdup.image import get_shape
def create_annotations_file(files, labels, save_path):
def create_tasks_file(files, labels, save_path):
def create_cvat_index(index, save_path):
def create_cvat_manifest(files, save_path):
def init_cvat_dir(save_path):
def copy_images_and_zip(files, save_path):
def do_export_to_cvat(files, labels, save_path):
init_cvat_dir(save_path)
create_annotations_file(files, labels, save_path)
create_tasks_file(files, labels, save_path)
index = create_cvat_manifest(files, save_path)
create_cvat_index(index, save_path)
copy_images_and_zip(files, save_path)
return 0 | null |
6,491 | import os
import shutil
import numpy as np
import pandas as pd
from PIL import Image
import tempfile
from pathlib import Path
from fastdup.fastdup_controller import FastdupController
from webcolors import rgb_to_name
import fastdup.definitions as FD
def gen_data(output_dir, n_valid, n_corrupted, n_duplicated, n_no_annotation, n_no_image):
#shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir, exist_ok=True)
colors = []
color_idx = 0
total_samples = n_valid + n_corrupted + n_duplicated + n_no_annotation + n_no_image
valid_colors, corrupted_colors, duplicated_colors, no_annotation_colors, no_image_colors = [], [], [], [], []
while len(colors) < total_samples:
color = tuple(np.random.choice(range(256), size=3))
if color not in colors:
colors.append(color)
# create valid images
for color in colors[color_idx:n_valid]:
valid_colors.append(get_df_dict(color))
save_color_im(output_dir, color)
color_idx += n_valid
# create images with no annotation
for color in colors[color_idx:color_idx + n_no_annotation]:
no_annotation_colors.append(get_df_dict(color, f'_not_in_annot'))
save_color_im(output_dir, color, f'_not_in_annot')
color_idx += n_no_annotation
# create corrupted images
for color in colors[color_idx:color_idx + n_corrupted]:
corrupted_colors.append(get_df_dict(color, f'_corrupted'))
create_corrupted_image(output_dir, color)
color_idx += n_corrupted
# create duplicated images
for color in colors[color_idx:color_idx + n_duplicated]:
for i in range(2):
duplicated_colors.append(get_df_dict(color, f'_{i}_duplicated'))
save_color_im(output_dir, color, f'_{i}_duplicated')
color_idx += n_duplicated
# create images with no image
for color in colors[color_idx:color_idx + n_no_image]:
no_image_colors.append(get_df_dict(color, f'_no_image'))
color_idx += n_no_image
return valid_colors, corrupted_colors, duplicated_colors, no_annotation_colors, no_image_colors
def create_synthetic_data(target_dir, n_valid=100, n_corrupted=21, n_duplicated=22, n_no_annotation=23, n_no_image=24):
valid_colors, corrupted_colors, duplicated_colors, no_annotation_colors, no_image_colors = \
gen_data(target_dir, n_valid, n_corrupted, n_duplicated, n_no_annotation, n_no_image)
df_annot = pd.DataFrame(valid_colors + corrupted_colors + duplicated_colors + no_image_colors)
df_valid = pd.DataFrame(valid_colors)
df_corrupted = pd.DataFrame(corrupted_colors)
df_not_in_annot = pd.DataFrame(no_annotation_colors)
df_duplicated = pd.DataFrame(duplicated_colors)
df_no_image = pd.DataFrame(no_image_colors)
return df_annot, df_valid, df_corrupted, df_not_in_annot, df_duplicated, df_no_image | null |
6,492 | import os
import cv2
import numpy as np
from fastdup.image import get_shape
from fastdup.sentry import fastdup_capture_exception
def image_to_label_img_xml(img_path, cur_label, save_dir=None):
assert os.path.exists(img_path), '{} does not exist'.format(img_path)
if save_dir:
assert os.path.exists(save_dir), '{} does not exist'.format(save_dir)
img = cv2.imread(img_path)
assert img is not None, f"Failed to read image {img_path}"
h, w, c = get_shape(img)
xml = f'<annotation>\n'
xml += f' <folder>{cur_label}</folder>\n'
xml += f' <filename>{os.path.basename(img_path)}</filename>\n'
xml += f' <path>{img_path}</path>\n'
xml += f' <source>\n'
xml += f' <database>Unknown</database>\n'
xml += f' </source>\n'
xml += f' <size>\n'
xml += f' <width>{w}</width>\n'
xml += f' <height>{h}</height>\n'
xml += f' <depth>{c}</depth>\n'
xml += f' </size>\n'
xml += f' <segmented>0</segmented>\n'
xml += f' <object>\n'
xml += f' <name>{cur_label}</name>\n'
xml += f' <pose>Unspecified</pose>\n'
xml += f' <truncated>0</truncated>\n'
xml += f' <difficult>0</difficult>\n'
xml += f' <bndbox>\n'
xml += f' <xmin>0</xmin>\n'
xml += f' <ymin>0</ymin>\n'
xml += f' <xmax>{w}</xmax>\n'
xml += f' <ymax>{h}</ymax>\n'
xml += f' </bndbox>\n'
xml += f' </object>\n'
xml += f'</annotation>\n'
ext = os.path.basename(img_path).split('.')[-1]
assert ext.lower() in ['jpg', 'jpeg','png','gif','tif','bmp']
local_xml = img_path[0:-len(ext)] + 'xml'
if save_dir is not None:
local_xml = os.path.join(save_dir, os.path.basename(local_xml))
with open(local_xml, 'w') as f:
f.write(xml)
assert os.path.exists(local_xml)
return xml
def export_label_classes(labels, save_path):
with open(os.path.join(save_path, 'classes.txt'), 'w') as f:
for label in labels:
f.write(label + '\n')
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
def do_export_to_labelimg(files, labels, save_path):
if not os.path.exists(save_path):
os.makedirs(save_path)
assert os.path.exists(save_path)
export_label_classes(list(np.unique(labels)), save_path)
count = 0
if labels is None:
for f in files:
try:
image_to_label_img_xml(f, None, save_path)
count+=1
except Exception as ex:
fastdup_capture_exception("do_export_to_labelimg", ex)
print('Failed to retag file', f, ' with exception', ex)
else:
assert len(labels) == len(files), "Number of labels and files should be the same"
for f,l in zip(files, labels):
try:
image_to_label_img_xml(f, l, save_path)
count+=1
except Exception as ex:
fastdup_capture_exception("do_export_to_labelimg", ex)
print('Failed to retag file', f, ' with exception', ex)
print('Successfully exported to labeliImg', count, 'files')
return 0 | null |
6,493 | import random
ascii_arts = [
"""
:++. ...
;&&$+. ;x: ;;;
.$&+ X&x ;;;
X$&&&$$; :$&&$+. ;$x .x&&&X: ;$&&&$$$: :;;;;:. ;;; :;;. .;;. :;; :;;;;;.
+x&&Xxx: :$&&Xx++X&&$&$ .&&$+;+$&&; :x&&$xxx: .;;;;;:;;;;;;; :;;. .;;: :;;;;;;:;;;;;:
.$&; :$&X +&&$ ;&$. ;XX X&x .;;; ;;;; :;;. .;;: :;;;. :;;:
.$&; +&&. x&$ .$&&$+. X&x ;;; ;;; :;;. .;;: :;;. ;;;
.$&; +&&. x&$ ;X&&&&$. X&x ;;; ;;; :;;. .;;: :;; ;;;
.$&; :&&; :&&$ .:. x&& X&x :;;; :;;; .;;: :;;: :;;; .;;;
.$&; +&&$; :X&&&$ ;&&+. .X&& X&&. :;;;: :;;;;; ;;;;. .;;;;: :;;;;:. .;;;;
.$&; +&&&&&&X.+&$ .X&&&&&&+ X&&&&; :;;;;;;;.;;; .;;;;;;;:;;: :;; :;;;;;;;.
:;;
:;;
:;;
.::
""",
"""
ad88 88
d8" ,d 88
88 88 88
MM88MMM ,adPPYYba, ,adPPYba, MM88MMM ,adPPYb,88 88 88 8b,dPPYba,
88 "" `Y8 I8[ "" 88 a8" `Y88 88 88 88P' "8a
88 ,adPPPPP88 `"Y8ba, 88 8b 88 88 88 88 d8
88 88, ,88 aa ]8I 88, "8a, ,d88 "8a, ,a88 88b, ,a8"
88 `"8bbdP"Y8 `"YbbdP"' "Y888 `"8bbdP"Y8 `"YbbdP'Y8 88`YbbdP"'
88
88
"""
]
def get_ascii_art():
choice = random.randint(0, len(ascii_arts) - 1)
return ascii_arts[choice] | null |
6,494 | import sentry_sdk
from sentry_sdk import capture_exception
import time
import os
import sys
import traceback
import platform
import uuid
import hashlib
from fastdup.definitions import VERSION__
from functools import wraps
def traces_sampler(sampling_context):
# Examine provided context data (including parent decision, if any)
# along with anything in the global namespace to compute the sample rate
# or sampling decision for this transaction
print(sampling_context)
return 1 | null |
6,495 | import sentry_sdk
from sentry_sdk import capture_exception
import time
import os
import sys
import traceback
import platform
import uuid
import hashlib
from fastdup.definitions import VERSION__
from functools import wraps
token = hashlib.sha256(str(uuid.getnode()).encode()).hexdigest()
unit_test = None
def find_certifi_path():
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
import platform
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if platform.system() == "Windows":
is_windows = True
import struct
assert struct.calcsize("P") * 8 == 64, "Detected 32 bit windows, not supported, please run with 64 bits windows"
SO_SUFFIX=".dll"
so_file = os.path.join(LOCAL_DIR, 'fastdup_shared' + SO_SUFFIX)
# https://docs.sentry.io/platforms/native/configuration/backends/crashpad/
if os.path.exists(os.path.join(LOCAL_DIR, 'crashpad_handler.exe')):
os.environ['SENTRY_CRASHPAD'] = os.path.join(LOCAL_DIR, 'crashpad_handler.exe')
elif platform.system() == "Darwin":
SO_SUFFIX=".dylib"
# https://docs.sentry.io/platforms/native/configuration/backends/crashpad/
if os.path.exists(os.path.join(LOCAL_DIR, 'lib/crashpad_handler')):
os.environ['SENTRY_CRASHPAD'] = os.path.join(LOCAL_DIR, 'lib/crashpad_handler')
else:
print('Failed to find crashpad handler on ', os.path.join(LOCAL_DIR, 'lib/crashpad_handler'))
so_file = os.path.join(LOCAL_DIR, 'libfastdup_shared' + SO_SUFFIX)
else:
SO_SUFFIX=".so"
so_file = os.path.join(LOCAL_DIR, 'libfastdup_shared' + SO_SUFFIX)
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
try:
# this should be supported only from python3.8 and up
if platform.system() == "Windows":
os.add_dll_directory(LOCAL_DIR)
#os.add_dll_directory(LOCAL_DIR + "\\lib")
os.add_dll_directory(os.path.join(os.environ['SystemRoot'], 'System32'))
#os.add_dll_directory("C:\\Program Files\\PowerShell\\7")
dll = WinDLL(so_file)
else:
dll = CDLL(so_file)
except Exception as ex:
fastdup_capture_exception("__init__", ex)
print("Please reach out to fastdup support, it seems installation is missing critical files to start fastdup.")
print("We would love to understand what has gone wrong.")
print("You can open an issue here: " + GITHUB_URL + " or email us at " + CONTACT_EMAIL)
find_command = "\"find " + LOCAL_DIR + " \""
if platform.system() == "Windows":
find_command = "\"tree " + LOCAL_DIR + " \""
print("Share out output of the command " + find_command)
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
VERSION__ = "1.86"
def init_sentry():
global unit_test
if 'SENTRY_OPT_OUT' not in os.environ:
if platform.system() == 'Darwin':
# fix CA certficate issue on latest MAC models
path = find_certifi_path()
if path is not None:
if 'SSL_CERT_FILE' not in os.environ:
os.environ["SSL_CERT_FILE"] = path
if 'REQUESTS_CA_BUNDLE' not in os.environ:
os.environ["REQUESTS_CA_BUNDLE"] = path
sentry_sdk.init(
dsn="https://b526f209751f4bcea856a1d90e7cf891@o4504135122944000.ingest.sentry.io/4504168616427520",
debug='SENTRY_DEBUG' in os.environ,
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1,
release=VERSION__,
default_integrations=False
)
unit_test = 'UNIT_TEST' in os.environ
try:
filename = os.path.join(os.environ.get('HOME', '/tmp'),".token")
if platform.system() == "Windows":
filename = os.path.join(os.environ.get('USERPROFILE',"c:\\"),".token")
with open(filename, "w") as f:
f.write(token)
#if platform.system() == "Windows":
# f.write("\n")
# LOCAL_DIR=os.path.dirname(os.path.abspath(__file__))
# f.write(LOCAL_DIR)
except:
pass | null |
6,496 | import sentry_sdk
from sentry_sdk import capture_exception
import time
import os
import sys
import traceback
import platform
import uuid
import hashlib
from fastdup.definitions import VERSION__
from functools import wraps
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def fastdup_capture_log_debug_state(config):
if 'SENTRY_OPT_OUT' not in os.environ:
breadcrumb = {'type':'debug', 'category':'setup', 'message':'snapshot', 'level':'info', 'timestamp':time.time() }
breadcrumb['data'] = config
#with sentry_sdk.configure_scope() as scope:
# scope.clear_breadcrumbs()
sentry_sdk.add_breadcrumb(breadcrumb) | null |
6,497 | import sentry_sdk
from sentry_sdk import capture_exception
import time
import os
import sys
import traceback
import platform
import uuid
import hashlib
from fastdup.definitions import VERSION__
from functools import wraps
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
def fastdup_performance_capture(section, start_time):
if 'SENTRY_OPT_OUT' not in os.environ:
try:
total_time = time.time()-start_time
if total_time == 0:
return
# avoid reporting unit tests back to sentry
if token == '41840345eec72833b7b9928a56260d557ba2a1e06f86d61d5dfe755fa05ade85':
import random
if random.random() < 0.995:
return
sentry_sdk.set_tag("runtime", str(time.time()-start_time))
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("runtime-sec", total_time)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
sentry_sdk.capture_message("Performance", scope=scope)
finally:
sentry_sdk.flush(timeout=5)
def v1_sentry_handler(func):
@wraps(func)
def inner_function(*args, **kwargs):
try:
start_time = time.time()
ret = func(*args, **kwargs)
fastdup_performance_capture(f"V1:{func.__name__}", start_time)
return ret
except RuntimeError as ex:
if str(ex) == 'Fastdup execution failed':
pass
else:
fastdup_capture_exception(f"V1:{func.__name__}", ex)
raise ex
except Exception as ex:
fastdup_capture_exception(f"V1:{func.__name__}", ex)
raise ex
return inner_function | null |
6,498 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
try:
from tqdm.auto import tqdm
except:
tqdm = (lambda x, total=None, desc=None: x)
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def format_image_html_string(img_paths, lazy_load, max_width, save_path=None):
if not lazy_load:
return [imageformatter(x, max_width) for x in img_paths]
else:
return ["<img src=\"" + shorten_image(x, save_path) + "\" loading=\"lazy\">" for x in img_paths]
def swap_dataframe(subdf, cols):
cols_no_images = [x for x in cols if (x.lower() != 'image' and not x.startswith('info') and x.lower()!= 'similar')]
new_rows = []
for i,row in subdf[cols_no_images].iterrows():
dfrow = pd.DataFrame(row)
new_rows.append(dfrow)
return new_rows
def find_label(get_label_func, df, in_col, out_col, vqa_prompt: str = None, kwargs=None):
if (get_label_func is not None):
if isinstance(get_label_func, str):
if os.path.exists(get_label_func):
df_labels = load_labels(get_label_func, kwargs)
assert len(df_labels) == len(df), f"Error: wrong length of labels file {get_label_func} expected {len(df)} got {len(df_labels)}"
df[out_col] = df_labels['label']
elif get_label_func in df.columns:
df[out_col] = df['label']
elif get_label_func in CAPTION_MODEL_NAMES:
from fastdup.captions import generate_labels
df[out_col] = generate_labels(df[in_col], get_label_func, device='cpu')
elif get_label_func == VQA_MODEL1_NAME:
from fastdup.captions import generate_vqa_labels
df[out_col] = generate_vqa_labels(df[in_col], vqa_prompt, kwargs)
elif get_label_func == AGE_LABEL1_NAME:
from fastdup.captions import generate_age_labels
df[out_col] = generate_age_labels(df[in_col], kwargs)
else:
assert False, f"Found str label {get_label_func} but it is neither a file nor a column name in the dataframe {df.columns}"
elif isinstance(get_label_func, dict):
df[out_col] = df[in_col].apply(lambda x: get_label_func.get(x, MISSING_LABEL))
elif callable(get_label_func):
assert len(df), "Empty dataframe"
assert in_col in df.columns, f"Missing column {in_col}"
df[out_col] = df[in_col].apply(lambda x: get_label_func(x))
else:
assert False, f"Failed to understand get_label_func type {type(get_label_func)}"
if kwargs is not None and 'debug_labels' in kwargs:
print(df.head())
return df
def slice_df(df, slice, colname, kwargs=None):
assert len(df), "Df has no rows"
split_sentence_to_label_list = kwargs is not None and 'split_sentence_to_label_list' in kwargs and kwargs['split_sentence_to_label_list']
debug_labels = kwargs is not None and 'debug_labels' in kwargs and kwargs['debug_labels']
grouped = kwargs is not None and 'grouped' in kwargs and kwargs['grouped']
if slice is not None:
if isinstance(slice, str):
# cover the case labels are string or lists of strings
if split_sentence_to_label_list:
labels = df[colname].astype(str).apply(lambda x: split_str(x.lower())).values
if debug_labels:
print('Label with split sentence', labels[:10])
else:
labels = df[colname].astype(str).values
if debug_labels:
print('label without split sentence', labels[:10])
is_list = isinstance(labels[0], list)
if grouped:
df = df[df[colname].apply(lambda x: slice in x)]
assert len(df), f"Failed to find any labels with value={slice}"
elif is_list:
labels = [item for sublist in labels for item in sublist]
if debug_labels:
print('labels after merging sublists', labels[:10])
df = df[df[colname].apply(lambda x: slice in [y.lower() for y in x])]
else:
df2 = df[df[colname] == slice]
if len(df2) == 0:
df2 = df[df[colname].apply(lambda x: slice in str(x))]
df = df2
elif isinstance(slice, list):
if isinstance(df[colname].values[0], list):
df = df[df[colname].apply(lambda x: len(set(x)&set(slice)) > 0)]
else:
df = df[df[colname].isin(slice)]
assert len(df), f"Failed to find any labels with {slice}"
else:
assert False, "slice must be a string or a list of strings"
return df
def slice_two_labels(df, slice):
if isinstance(slice, str):
if slice == "diff":
df = df[df['label'] != df['label2']]
elif slice == "same":
df = df[df['label'] == df['label2']]
return df
def extract_filenames(row, work_dir, save_path, kwargs):
debug_hierarchical = 'debug_hierarchical' in kwargs and kwargs['debug_hierarchical']
hierarchical_run = 'hierarchical_run' in kwargs and kwargs['hierarchical_run']
draw_orig_image = 'draw_orig_image' in kwargs and kwargs['draw_orig_image']
if hierarchical_run and not draw_orig_image:
assert 'cluster_from' in row, "Failed to find cluster_from in row " + str(row)
impath1 = save_path + f"/images/component_{row['counter_from']}_{row['cluster_from']}.jpg"
impath2 = save_path + f"/images/component_{row['counter_to']}_{row['cluster_to']}.jpg"
if debug_hierarchical:
print('was in extract_filenames', impath1, impath2)
else:
assert not pd.isnull(row['to']) and not pd.isnull(row['from']), f"Found nan in row {row}"
impath1 = lookup_filename(row['from'], work_dir)
impath2 = lookup_filename(row['to'], work_dir)
dist = row['distance']
if ~impath1.startswith(S3_TEMP_FOLDER) and ~impath1.startswith(S3_TEST_TEMP_FOLDER):
os.path.exists(impath1), "Failed to find image file " + impath1
if ~impath2.startswith(S3_TEMP_FOLDER) and ~impath2.startswith(S3_TEST_TEMP_FOLDER):
os.path.exists(impath2), "Failed to find image file " + impath2
if 'label' in row:
type1 = row['label']
else:
type1 = get_type(impath1)
if 'label2' in row:
type2 = row['label2']
else:
type2 = get_type(impath2)
if type1 == "unknown" and type2 == "unknown":
ptype = ""
else:
ptype = '{0}_{1}'.format(type1, type2)
return impath1, impath2, dist, ptype
def prepare_hierarchy(df, work_dir, save_path, debug_hierarchical, kwargs):
# from,to,cluster_from,cluster_do,distance
# /mnt/data/sku110k/val_245.jpg,/mnt/data/sku110k/train_953.jpg,4,0,0.876736
# /mnt/data/sku110k/train_6339.jpg,/mnt/data/sku110k/train_953.jpg,19,0,0.891410
# /mnt/data/sku110k/train_6339.jpg,/mnt/data/sku110k/val_245.jpg,19,4,0.947931
assert(work_dir is not None), "work_dir must be specified when running hierarchical_run"
assert os.path.exists(os.path.join(save_path, 'images')), "Failed to find images folder in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
draw_orig_image = 'draw_orig_image' in kwargs and kwargs['draw_orig_image']
comp_images = os.listdir(os.path.join(save_path, 'images'))
comp_images = [x for x in comp_images if 'component_' in x]
comp_map = {}
assert len(comp_images), "Failed to find any component images in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
for i in comp_images:
counter = int(os.path.basename(i).split('_')[1])
assert (counter >= 0), "Failed to parse component counter from index " + i
comp_id = int(os.path.basename(i).split('_')[2].replace('.jpg', ''))
assert( comp_id >= 0), "Failed to parse component id from index " + i
comp_map[comp_id] = counter
if (debug_hierarchical):
print('comp_map', comp_map)
assert len(comp_map), "Failed to find any component images in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
comp_map_set = set(comp_map.keys())
assert 'cluster_from' in df.columns and 'cluster_to' in df.columns, "Failed to find cluster_from and cluster_to columns in similarity file, run fastdup.create_components_gallery(..., lazy_load=True) first"
df['counter_from'] = df['cluster_from'].apply(lambda x: comp_map.get(x,-1))
df['counter_to'] = df['cluster_to'].apply(lambda x: comp_map.get(x,-1))
if (debug_hierarchical):
print('df', df.head())
print('len df sim orig', len(df))
print('Going to filter by set', comp_map_set)
if draw_orig_image:
df = df[df['cluster_from'].isin(comp_map_set) & df['cluster_to'].isin(comp_map_set)]
assert len(df), "Failed to find any rows with custers in top component set < " + str(len(comp_images)) + " Try to run create_components_gallery with larger number of components, using num_images=XX"
if debug_hierarchical:
print('df after removed set', df.head())
print('len df sim after removed set', len(df))
df = df.sort_values('distance', ascending=False)
if (debug_hierarchical):
print('sorted df', df.head())
return df
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def clean_images(lazy_load, img_paths, section):
if not lazy_load:
for i in img_paths:
try:
if i is not None and os.path.exists(i):
os.unlink(i)
except Exception as ex:
print("Failed to delete image file ", i, ex)
fastdup_capture_exception(section, ex)
def create_triplet_img(index, row, work_dir, save_path, extract_filenames, get_bounding_box_func=None, input_dir=None, kwargs=None):
img1_path, img2_path, distance, ptype = extract_filenames(row, work_dir, save_path, kwargs)
img1 = fastdup_imread(img1_path, input_dir, kwargs)
img2 = fastdup_imread(img2_path, input_dir, kwargs)
assert img1 is not None, f"Failed to read image1 {img1_path} {str(input_dir)}"
assert img2 is not None, f"Failed to read image2 {img2_path} {str(input_dir)}"
if 'crop_filename_from' in row and 'crop_filename_to' in row:
id_from, id_to = row['crop_filename_from'], row['crop_filename_to']
else:
id_from, id_to = row['from'], row['to']
img1 = plot_bounding_box(img1, get_bounding_box_func, id_from)
img2 = plot_bounding_box(img2, get_bounding_box_func, id_to)
h1, w1, c1 = get_shape(img1)
h2, w2, c2 = get_shape(img2)
assert h1 > 0 and h2 > 0 and w1 > 0 and w2 > 0
w = 320
rimg1 = cv2.resize(img1, (w, int(h1*w/h1)))
rimg2 = cv2.resize(img2, (w, int(h1*w/h1)))
assert rimg1.shape[0] > 0 and rimg2.shape[0] > 0
alpha = 0.5
if rimg1.shape != rimg2.shape: # combination of grayscale and color
if len(rimg1.shape) == 2:
rimg1 = cv2.cvtColor(rimg1, cv2.COLOR_GRAY2RGB)
elif len(rimg1.shape) ==3 and rimg1.shape[2] == 4:
rimg1 = cv2.cvtColor(rimg1, cv2.COLOR_RGBA2RGB)
if len(rimg2.shape) == 2:
rimg2 = cv2.cvtColor(rimg2, cv2.COLOR_GRAY2RGB)
elif len(rimg1.shape) ==3 and rimg2.shape[2] == 4:
rimg2 = cv2.cvtColor(rimg2, cv2.COLOR_RGBA2RGB)
error_weighted = False
try:
cimage = cv2.addWeighted(rimg1,alpha,rimg2,1-alpha,0)
except Exception as ex:
error_weighted = True
fastdup_capture_exception("create_triplet_image", ex, True, f"Dimes are {rimg1.shape} {rimg2.shape}")
hierarchical_run = kwargs is not None and 'hierarchical_run' in kwargs and kwargs['hierarchical_run']
text1 = os.path.splitext(os.path.basename(img1_path))[0]
text2 = os.path.splitext(os.path.basename(img2_path))[0]
if hierarchical_run:
text1 = text1.split('_')[2]
text2 = text2.split('_')[2]
(w, h),nimg1 = draw_text(rimg1, text1, font_scale=1, pos=(10, 10))
(w, h),nimg2 = draw_text(rimg2, text2, font_scale=1, pos=(10, 10))
if not error_weighted:
(w, h),cimage = draw_text(cimage, 'blended image', font_scale=1, pos=(10, 10))
assert cimage.shape[0] > 0 and cimage.shape[1] > 0
if hierarchical_run or error_weighted:
hcon_img = hconcat_resize_min([nimg1, nimg2])
else:
hcon_img = hconcat_resize_min([nimg1, nimg2, cimage])
if ptype != "":
summary_txt = 'type: {0}, distance: {1:.2f}'.format(ptype, distance)
else:
summary_txt = 'distance: {0:.2f}'.format(distance)
y = int(hcon_img.shape[0]*0.9)
x = int(hcon_img.shape[1]/3)
if not hierarchical_run:
(w, h),hcon_img = draw_text(hcon_img, summary_txt, font_scale=1, pos=(10, y))
name1 = os.path.splitext(os.path.basename(img1_path))[0]
name2 = os.path.splitext(os.path.basename(img2_path))[0]
pid = '{0}_{1}'.format(name1,name2) #+ suffix_from + suffix_to
lazy_load = 'lazy_load' in kwargs and kwargs['lazy_load']
if lazy_load:
os.makedirs(os.path.join(save_path, 'images'), exist_ok=True)
hcon_img_path = f'{save_path}/images/{pid}_{index}.jpg'
else:
hcon_img_path = f'{save_path}/{pid}_{index}.jpg'
fastdup_imwrite(hcon_img_path, hcon_img)
return hcon_img, hcon_img_path
def convert_v1_to_v02(df):
if 'filename_from' in df.columns and 'filename_to' in df.columns:
del df['from']
del df['to']
df = df.rename(columns={'filename_from': 'from', 'filename_to': 'to'})
if 'filename_outlier' in df.columns and 'filename_nearest' in df.columns:
df = df.rename(columns={'filename_outlier': 'from', 'filename_nearest': 'to'})
if 'label_from' in df.columns and 'label_to' in df.columns:
df = df.rename(columns={'label_from': 'label', 'label_to': 'label2'})
if 'label_outlier' in df.columns:
df = df.rename(columns={'label_outlier': 'label'})
return df
def calc_save_dir(save_path):
save_dir = save_path
if save_dir.endswith(".html"):
save_dir = os.path.dirname(save_dir)
if save_dir == "":
save_dir = "."
return save_dir
def load_filenames(work_dir, kwargs):
assert work_dir is not None and isinstance(work_dir, str) and os.path.exists(work_dir), \
f"Need to specify work_dir to point to the location of fastdup work_dir, got {work_dir}"
load_crops = 'load_crops' in kwargs and kwargs['load_crops']
draw_bbox = 'draw_bbox' in kwargs and kwargs['draw_bbox']
if work_dir.endswith('.csv'):
local_filenames = work_dir
elif load_crops or draw_bbox:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_CROP_LIST)
else:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_IMAGE_LIST)
assert os.path.isfile(local_filenames), "Failed to find fastdup input file " + local_filenames
nrows = find_nrows(kwargs)
import pandas as pd
filenames = pd.read_csv(local_filenames, nrows=nrows)
assert len(filenames), "Empty dataframe found " + local_filenames
assert 'filename' in filenames.columns, f"Error: Failed to find filename column in {work_dir}/atrain_{FILENAME_IMAGE_LIST}"
if load_crops and not draw_bbox:
assert 'crop_filename' in filenames.columns, f"Failed to load crop filename {local_filenames}"
filenames["filename"] = filenames["crop_filename"]
return filenames
def load_stats(stats_file, work_dir, kwargs={}, usecols=None):
assert stats_file is not None, "None stat file"
nrows = find_nrows(kwargs)
stats = stats_file
import pandas as pd
if isinstance(stats_file, pd.DataFrame):
if nrows is not None:
stats = stats_file.head(nrows)
assert work_dir is not None, "When calling with stats_file which is a pd.DataFrame need to point work_dir to the fastdup work_dir folder"
kwargs["external_df"] = True
elif isinstance(stats_file, str):
assert stats_file is not None and isinstance(stats_file, str) and os.path.exists(stats_file), \
"Need to specify work_dir to point to the location of fastdup atrain_stats.csv stats file"
if stats_file.endswith(".csv") and os.path.isfile(stats_file):
local_filenames = stats_file
if work_dir is None:
work_dir = os.path.dirname(local_filenames)
elif os.path.isdir(stats_file):
local_filenames = os.path.join(stats_file, "atrain_" + FILENAME_IMAGE_STATS)
if work_dir is None:
work_dir = stats_file
else:
assert False, "Failed to find stats file " + stats_file
assert os.path.exists(local_filenames), f"Failed to read stats file {local_filenames} please make sure fastdup was run and this file was created."
stats = pd.read_csv(local_filenames, nrows=nrows, usecols=usecols)
assert len(stats), "Empty dataframe found " + local_filenames
else:
assert False, "wrong type " + stats_file
assert stats is not None, "Failed to find stats file " + str(stats_file) + " " + str(work_dir)
if 'filename' not in stats.columns and 'from' not in stats.columns and 'to' not in stats.columns:
assert 'index' in stats.columns, "Failed to find index columns" + str(stats.columns)
filenames = load_filenames(work_dir, kwargs)
if len(filenames) == len(stats):
assert 'filename' in filenames.columns, "Failed to find filename column in atrain_features.dat.csv file"
stats['filename'] = filenames['filename']
else:
stats = merge_stats_with_filenames(stats, filenames)
assert stats is not None and len(stats), "Failed to read stats"
assert 'filename' in stats.columns, f"Error: Failed to find filename column"
assert stats['filename'].values[0] is not None, "Failed to find stats filenames"
return stats
def merge_with_filenames(df, filenames):
df2 = df.merge(filenames, left_on='from', right_on='index').merge(filenames, left_on='to', right_on='index')
assert df2 is not None and len(df2), f"Failed to merge similarity/outliers with atrain_features.dat.csv file, \n{df.head()}, \n{filenames.head()}"
df = df2
del df['from']
del df['to']
del df['index_x']
del df['index_y']
df = df.rename(columns={'filename_x': 'from', 'filename_y': 'to'})
return df
def get_bounding_box_func_helper(get_bounding_box_func):
if get_bounding_box_func is None:
return None
import pandas as pd
if callable(get_bounding_box_func) or isinstance(get_bounding_box_func, dict):
return get_bounding_box_func
elif isinstance(get_bounding_box_func, str):
if os.path.isfile(get_bounding_box_func):
df = pd.read_csv(get_bounding_box_func)
elif os.path.isdir(get_bounding_box_func):
local_file = os.path.join(get_bounding_box_func, "atrain_crops.csv")
assert os.path.exists(local_file), "Failed to find bounding box file in " + local_file
df = pd.read_csv(os.path.join(get_bounding_box_func, "atrain_crops.csv"))
else:
assert False, "Failed to find input file/folder " + get_bounding_box_func
elif isinstance(get_bounding_box_func, pd.DataFrame):
df = get_bounding_box_func
else:
assert False, "get_bounding_box_func should be a callable function, a dictionary, a file with bounding box info or a dataframe"
assert len(df), "Empty dataframe with bounding box information"
assert "filename" in df.columns
assert "row_y" in df.columns
assert "col_x" in df.columns
assert "width" in df.columns
assert "height" in df.columns
df["bbox"] = df.apply(lambda x: [x["col_x"], x["row_y"], x["col_x"] + x["width"], x["row_y"] + x["height"]], axis=1)
df = df.groupby('filename')['bbox'].apply(list).reset_index()
my_dict = df.set_index('filename')['bbox'].to_dict()
return my_dict
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_duplicates_gallery` function. Write a Python function `def do_create_duplicates_gallery(similarity_file, save_path, num_images=20, descending=True, lazy_load=False, get_label_func=None, slice=None, max_width=None, get_bounding_box_func=None, get_reformat_filename_func=None, get_extra_col_func=None, input_dir=None, work_dir=None, threshold=None, **kwargs)` to solve the following problem:
Function to create and display a gallery of images computed by the similarity metrics Parameters: similarity_file (str): csv file with the computed similarities by the fastdup tool, alternatively it can be a pandas dataframe with the computed similarities. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. descending (boolean): If False, print the similarities from the least similar to the most similar. Default is True. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. slice (str): Optional parameter to select a slice of the outliers file based on a specific label. max_width (int): Optional parameter to set the max width of the gallery. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string. The input is an absolute path to the image and the output is the string to display instead of the filename. get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery. input_dir (str): Optional parameter to allow reading images from a different path, or from webdataset tar files which are found on a different path work_dir (str): Optional parameter to specify fastdup work_dir when the similarity file is a pd.DataFrame threshold (float): Optional parameter to allow filtering out images with a similarity score above a certain threshold (allowed values 0 -> 1) save_artifacts (boolean): Optional parameter to allow saving the intermediate artifacts (raw images, csv with results) to the output folder Returns: ret (int): 0 if success, 1 if failed
Here is the function:
def do_create_duplicates_gallery(similarity_file, save_path, num_images=20, descending=True,
lazy_load=False, get_label_func=None, slice=None, max_width=None,
get_bounding_box_func=None, get_reformat_filename_func=None,
get_extra_col_func=None, input_dir=None, work_dir=None, threshold=None, **kwargs):
'''
Function to create and display a gallery of images computed by the similarity metrics
Parameters:
similarity_file (str): csv file with the computed similarities by the fastdup tool, alternatively it can be a pandas dataframe with the computed similarities.
save_path (str): output folder location for the visuals
num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory.
descending (boolean): If False, print the similarities from the least similar to the most similar. Default is True.
lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size).
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
slice (str): Optional parameter to select a slice of the outliers file based on a specific label.
max_width (int): Optional parameter to set the max width of the gallery.
get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image.
The input is an absolute path to the image and the output is a list of bounding boxes.
Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]]
Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename.
Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists
get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string.
The input is an absolute path to the image and the output is the string to display instead of the filename.
get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery.
input_dir (str): Optional parameter to allow reading images from a different path, or from webdataset tar files which are found on a different path
work_dir (str): Optional parameter to specify fastdup work_dir when the similarity file is a pd.DataFrame
threshold (float): Optional parameter to allow filtering out images with a similarity score above a certain threshold (allowed values 0 -> 1)
save_artifacts (boolean): Optional parameter to allow saving the intermediate artifacts (raw images, csv with results) to the output folder
Returns:
ret (int): 0 if success, 1 if failed
'''
img_paths = []
#v1 = 'id_to_filename_func' in kwargs
kwargs['lazy_load'] = lazy_load
hierarchical_run = kwargs is not None and 'hierarchical_run' in kwargs and kwargs['hierarchical_run']
draw_orig_image = 'draw_orig_image' in kwargs and kwargs['draw_orig_image']
blur_threshold = None
if 'blur_threshold' in kwargs:
blur_threshold = kwargs['blur_threshold']
save_dir = calc_save_dir(save_path)
df = similarity_file
df = convert_v1_to_v02(df)
if df['from'].dtype in [int, np.int64]:
assert df['to'].dtype in [int, np.int64], "Wrong types, expect both str or both int"
filenames = load_filenames(work_dir, kwargs)
filenames = filenames[["index","filename"]]
df = merge_with_filenames(df, filenames)
get_bounding_box_func = get_bounding_box_func_helper(get_bounding_box_func)
if blur_threshold is not None:
stats = load_stats(work_dir, None, kwargs)
df['blur'] = stats['blur']
orig_len = len(df)
df = df[df['blur'] > blur_threshold]
print(f"Filtered {orig_len-len(df)} blurry images, remained with {len(df)}")
assert len(df), f"Failed to find images above blur threshold {blur_threshold}"
if threshold is not None:
df = df[df['distance'] >= threshold]
assert len(df), f"Failed to find any duplicates images with similarity score >= {threshold}"
if 'external_df' not in kwargs: # external_df contains sorting by the user
df = df.sort_values('distance', ascending=not descending)
if 'crop_filename_from' not in df.columns:
df = df.drop_duplicates(subset=['from', 'to'])
if get_label_func is not None and slice is not None:
df = find_label(get_label_func, df, 'from', 'label', kwargs)
df = slice_df(df, slice, 'label', kwargs)
if slice in ["diff","same"]:
df = find_label(get_label_func, df, 'to', 'label2', kwargs)
df = slice_two_labels(df, slice)
debug_hierarchical= kwargs is not None and 'debug_hierarchical' in kwargs and kwargs['debug_hierarchical']
if 'hierarchical_run' in kwargs and kwargs['hierarchical_run']:
df = prepare_hierarchy(df, work_dir, save_dir, debug_hierarchical, kwargs)
sets = {}
if 'is_video' in kwargs:
filenames = load_filenames(work_dir, kwargs)
filenames['dirname'] = filenames['filename'].apply(os.path.dirname)
frames = filenames.groupby(['dirname']).size().reset_index(name='num_frames')
df = similarity_file.merge(frames, how='left', left_on=['subfolder1'], right_on=['dirname'])
subdf = df.head(num_images)
# lazy eval of labels as this may be slow
if get_label_func is not None and slice is None and 'label' not in subdf.columns and 'label2' not in subdf.columns:
subdf = find_label(get_label_func, subdf, 'from', 'label', kwargs)
subdf = find_label(get_label_func, subdf, 'to', 'label2', kwargs)
subdf = subdf.reset_index()
if 'is_video' in kwargs:
subdf['ratio'] = subdf['counts'].astype(float) / subdf['num_frames'].astype(float)
subdf['ratio'] = subdf['ratio'].apply(lambda x: round(x,3))
indexes = []
for i, row in tqdm(subdf.iterrows(), total=min(num_images, len(subdf)), desc="Generating gallery"):
if 'crop_filename_from' in row:
im1, im2 = str(row['crop_filename_from']), str(row['crop_filename_to'])
else:
im1, im2 = str(row['from']), str(row['to'])
if im1 + '_' + im2 in sets:
continue
try:
img, imgpath = create_triplet_img(i, row, work_dir, save_dir, extract_filenames, get_bounding_box_func,
input_dir, kwargs)
sets[im1 +'_' + im2] = True
sets[im2 +'_' + im1] = True
indexes.append(i)
img_paths.append(imgpath)
except Exception as ex:
fastdup_capture_exception("triplet image", ex)
print("Failed to generate viz for images", im1, im2, ex)
#img_paths.append(None)
subdf = subdf.iloc[indexes]
import fastdup.html_writer
html_img = format_image_html_string(img_paths, lazy_load, None, save_dir)
subdf.insert(0, 'Image', html_img)
if str(save_path).endswith(".html"):
out_file = save_path
else:
out_file = os.path.join(save_path, FILENAME_DUPLICATES_HTML) if not hierarchical_run else os.path.join(save_path, 'similarity_hierarchical.html')
subdf = subdf.rename(columns={'from':'From', 'to':'To'}, inplace=False)
subdf = subdf.rename(columns={'distance':'Distance'}, inplace=False)
fields = ['Image', 'Distance', 'From', 'To']
if get_label_func is not None or ('label' in subdf.columns and 'label2' in subdf.columns):
subdf = subdf.rename(columns={'label':'From_Label','label2':'To_Label'}, inplace=False)
fields.extend(['From_Label', 'To_Label'])
# for video, show duplicate counts between frames
if 'ratio' in subdf.columns:
fields = ['ratio'] + fields
if callable(get_extra_col_func):
subdf['extra'] = subdf['From'].apply(lambda x: get_extra_col_func(x))
subdf['extra2'] = subdf['To'].apply(lambda x: get_extra_col_func(x))
fields.append('extra')
fields.append('extra2')
if get_reformat_filename_func is not None and callable(get_reformat_filename_func):
subdf['From'] = subdf['From'].apply(lambda x: get_reformat_filename_func(x))
subdf['To'] = subdf['To'].apply(lambda x: get_reformat_filename_func(x))
title = 'Duplicates Report'
subtitle = "Showing duplicate"
if 'is_video' in kwargs:
title = 'Video Duplicates Report'
subtitle += " video pairs"
else:
subtitle += " image pairs"
if hierarchical_run:
title = 'Hierarchical Duplicates Report'
subtitle = "Showing hierarchical images pairs"
if slice is not None:
if slice == "diff":
title += ", of different classes"
else:
title += ", for label " + str(slice)
assert len(subdf), "Error: failed to find any duplicates, try to run() with lower threshold"
if 'get_display_filename_func' in kwargs:
subdf['From'] = subdf['From'].apply(kwargs['get_display_filename_func'])
subdf['To'] = subdf['To'].apply(kwargs['get_display_filename_func'])
#elif 'id_to_filename_func' in kwargs:
# subdf['From'] = subdf['From'].apply(kwargs['id_to_filename_func'])
# subdf['To'] = subdf['To'].apply(kwargs['id_to_filename_func'])
subdf['info'] = swap_dataframe(subdf, fields)
if max_width is None:
max_width = 600
fastdup.html_writer.write_to_html_file(subdf[['Image','info']], title, out_file, None, None, max_width,
jupyter_html=kwargs.get('jupyter_html', False))
assert os.path.exists(out_file), "Failed to generate out file " + out_file
save_artifacts = 'save_artifacts' in kwargs and kwargs['save_artifacts']
if save_artifacts:
save_artifacts_file = os.path.join(save_dir, 'similarity_artifacts.csv')
subdf[list(set(fields)-set(['Image']))].to_csv(save_artifacts_file, index=False)
print("Stored similarity artifacts in ", save_artifacts_file)
print_success_msg('similarity', out_file, lazy_load)
clean_images(lazy_load or save_artifacts, img_paths, "create_duplicates_gallery")
return 0 | Function to create and display a gallery of images computed by the similarity metrics Parameters: similarity_file (str): csv file with the computed similarities by the fastdup tool, alternatively it can be a pandas dataframe with the computed similarities. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. descending (boolean): If False, print the similarities from the least similar to the most similar. Default is True. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. slice (str): Optional parameter to select a slice of the outliers file based on a specific label. max_width (int): Optional parameter to set the max width of the gallery. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string. The input is an absolute path to the image and the output is the string to display instead of the filename. get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery. input_dir (str): Optional parameter to allow reading images from a different path, or from webdataset tar files which are found on a different path work_dir (str): Optional parameter to specify fastdup work_dir when the similarity file is a pd.DataFrame threshold (float): Optional parameter to allow filtering out images with a similarity score above a certain threshold (allowed values 0 -> 1) save_artifacts (boolean): Optional parameter to allow saving the intermediate artifacts (raw images, csv with results) to the output folder Returns: ret (int): 0 if success, 1 if failed |
6,499 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
try:
from tqdm.auto import tqdm
except:
tqdm = (lambda x, total=None, desc=None: x)
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def format_image_html_string(img_paths, lazy_load, max_width, save_path=None):
if not lazy_load:
return [imageformatter(x, max_width) for x in img_paths]
else:
return ["<img src=\"" + shorten_image(x, save_path) + "\" loading=\"lazy\">" for x in img_paths]
def swap_dataframe(subdf, cols):
cols_no_images = [x for x in cols if (x.lower() != 'image' and not x.startswith('info') and x.lower()!= 'similar')]
new_rows = []
for i,row in subdf[cols_no_images].iterrows():
dfrow = pd.DataFrame(row)
new_rows.append(dfrow)
return new_rows
def find_label(get_label_func, df, in_col, out_col, vqa_prompt: str = None, kwargs=None):
if (get_label_func is not None):
if isinstance(get_label_func, str):
if os.path.exists(get_label_func):
df_labels = load_labels(get_label_func, kwargs)
assert len(df_labels) == len(df), f"Error: wrong length of labels file {get_label_func} expected {len(df)} got {len(df_labels)}"
df[out_col] = df_labels['label']
elif get_label_func in df.columns:
df[out_col] = df['label']
elif get_label_func in CAPTION_MODEL_NAMES:
from fastdup.captions import generate_labels
df[out_col] = generate_labels(df[in_col], get_label_func, device='cpu')
elif get_label_func == VQA_MODEL1_NAME:
from fastdup.captions import generate_vqa_labels
df[out_col] = generate_vqa_labels(df[in_col], vqa_prompt, kwargs)
elif get_label_func == AGE_LABEL1_NAME:
from fastdup.captions import generate_age_labels
df[out_col] = generate_age_labels(df[in_col], kwargs)
else:
assert False, f"Found str label {get_label_func} but it is neither a file nor a column name in the dataframe {df.columns}"
elif isinstance(get_label_func, dict):
df[out_col] = df[in_col].apply(lambda x: get_label_func.get(x, MISSING_LABEL))
elif callable(get_label_func):
assert len(df), "Empty dataframe"
assert in_col in df.columns, f"Missing column {in_col}"
df[out_col] = df[in_col].apply(lambda x: get_label_func(x))
else:
assert False, f"Failed to understand get_label_func type {type(get_label_func)}"
if kwargs is not None and 'debug_labels' in kwargs:
print(df.head())
return df
def slice_df(df, slice, colname, kwargs=None):
assert len(df), "Df has no rows"
split_sentence_to_label_list = kwargs is not None and 'split_sentence_to_label_list' in kwargs and kwargs['split_sentence_to_label_list']
debug_labels = kwargs is not None and 'debug_labels' in kwargs and kwargs['debug_labels']
grouped = kwargs is not None and 'grouped' in kwargs and kwargs['grouped']
if slice is not None:
if isinstance(slice, str):
# cover the case labels are string or lists of strings
if split_sentence_to_label_list:
labels = df[colname].astype(str).apply(lambda x: split_str(x.lower())).values
if debug_labels:
print('Label with split sentence', labels[:10])
else:
labels = df[colname].astype(str).values
if debug_labels:
print('label without split sentence', labels[:10])
is_list = isinstance(labels[0], list)
if grouped:
df = df[df[colname].apply(lambda x: slice in x)]
assert len(df), f"Failed to find any labels with value={slice}"
elif is_list:
labels = [item for sublist in labels for item in sublist]
if debug_labels:
print('labels after merging sublists', labels[:10])
df = df[df[colname].apply(lambda x: slice in [y.lower() for y in x])]
else:
df2 = df[df[colname] == slice]
if len(df2) == 0:
df2 = df[df[colname].apply(lambda x: slice in str(x))]
df = df2
elif isinstance(slice, list):
if isinstance(df[colname].values[0], list):
df = df[df[colname].apply(lambda x: len(set(x)&set(slice)) > 0)]
else:
df = df[df[colname].isin(slice)]
assert len(df), f"Failed to find any labels with {slice}"
else:
assert False, "slice must be a string or a list of strings"
return df
def prepare_hierarchy(df, work_dir, save_path, debug_hierarchical, kwargs):
# from,to,cluster_from,cluster_do,distance
# /mnt/data/sku110k/val_245.jpg,/mnt/data/sku110k/train_953.jpg,4,0,0.876736
# /mnt/data/sku110k/train_6339.jpg,/mnt/data/sku110k/train_953.jpg,19,0,0.891410
# /mnt/data/sku110k/train_6339.jpg,/mnt/data/sku110k/val_245.jpg,19,4,0.947931
assert(work_dir is not None), "work_dir must be specified when running hierarchical_run"
assert os.path.exists(os.path.join(save_path, 'images')), "Failed to find images folder in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
draw_orig_image = 'draw_orig_image' in kwargs and kwargs['draw_orig_image']
comp_images = os.listdir(os.path.join(save_path, 'images'))
comp_images = [x for x in comp_images if 'component_' in x]
comp_map = {}
assert len(comp_images), "Failed to find any component images in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
for i in comp_images:
counter = int(os.path.basename(i).split('_')[1])
assert (counter >= 0), "Failed to parse component counter from index " + i
comp_id = int(os.path.basename(i).split('_')[2].replace('.jpg', ''))
assert( comp_id >= 0), "Failed to parse component id from index " + i
comp_map[comp_id] = counter
if (debug_hierarchical):
print('comp_map', comp_map)
assert len(comp_map), "Failed to find any component images in save_path, run fastdup.create_components_gallery(..., lazy_load=True) first"
comp_map_set = set(comp_map.keys())
assert 'cluster_from' in df.columns and 'cluster_to' in df.columns, "Failed to find cluster_from and cluster_to columns in similarity file, run fastdup.create_components_gallery(..., lazy_load=True) first"
df['counter_from'] = df['cluster_from'].apply(lambda x: comp_map.get(x,-1))
df['counter_to'] = df['cluster_to'].apply(lambda x: comp_map.get(x,-1))
if (debug_hierarchical):
print('df', df.head())
print('len df sim orig', len(df))
print('Going to filter by set', comp_map_set)
if draw_orig_image:
df = df[df['cluster_from'].isin(comp_map_set) & df['cluster_to'].isin(comp_map_set)]
assert len(df), "Failed to find any rows with custers in top component set < " + str(len(comp_images)) + " Try to run create_components_gallery with larger number of components, using num_images=XX"
if debug_hierarchical:
print('df after removed set', df.head())
print('len df sim after removed set', len(df))
df = df.sort_values('distance', ascending=False)
if (debug_hierarchical):
print('sorted df', df.head())
return df
def load_one_image_for_outliers(args):
row, work_dir, input_dir, get_bounding_box_func, max_width, save_path, kwargs = args
impath1, impath2, dist, ptype = extract_filenames(row, work_dir, save_path, kwargs)
try:
img = fastdup_imread(impath1, input_dir, kwargs)
assert img is not None, f"Failed to read image from {impath1} {input_dir}"
#find the index to retreive the bounding box.
if 'crop_filename_outlier' in row:
outlier_id = row['crop_filename_outlier']
else:
outlier_id = row['from']
img = plot_bounding_box(img, get_bounding_box_func, outlier_id)
assert img is not None, f"Failed to plot bb from {impath1} {input_dir}"
img = my_resize(img, max_width=max_width)
assert img is not None, f"Failed to resize image from {impath1} {input_dir}"
if 'enhance_image' in kwargs and kwargs['enhance_image']:
img = enhance_image(img)
#consider saving second image as well!
#make sure image file is unique, so add also folder name into the imagefile
lazy_load = 'lazy_load' in kwargs and kwargs['lazy_load']
imgpath = calc_image_path(lazy_load, save_path, impath1, "")
fastdup_imwrite(imgpath, img)
except Exception as ex:
fastdup_capture_exception(f"load_one_image_for_outliers", ex)
imgpath = None
return imgpath
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def clean_images(lazy_load, img_paths, section):
if not lazy_load:
for i in img_paths:
try:
if i is not None and os.path.exists(i):
os.unlink(i)
except Exception as ex:
print("Failed to delete image file ", i, ex)
fastdup_capture_exception(section, ex)
def convert_v1_to_v02(df):
if 'filename_from' in df.columns and 'filename_to' in df.columns:
del df['from']
del df['to']
df = df.rename(columns={'filename_from': 'from', 'filename_to': 'to'})
if 'filename_outlier' in df.columns and 'filename_nearest' in df.columns:
df = df.rename(columns={'filename_outlier': 'from', 'filename_nearest': 'to'})
if 'label_from' in df.columns and 'label_to' in df.columns:
df = df.rename(columns={'label_from': 'label', 'label_to': 'label2'})
if 'label_outlier' in df.columns:
df = df.rename(columns={'label_outlier': 'label'})
return df
def calc_save_dir(save_path):
save_dir = save_path
if save_dir.endswith(".html"):
save_dir = os.path.dirname(save_dir)
if save_dir == "":
save_dir = "."
return save_dir
def load_filenames(work_dir, kwargs):
assert work_dir is not None and isinstance(work_dir, str) and os.path.exists(work_dir), \
f"Need to specify work_dir to point to the location of fastdup work_dir, got {work_dir}"
load_crops = 'load_crops' in kwargs and kwargs['load_crops']
draw_bbox = 'draw_bbox' in kwargs and kwargs['draw_bbox']
if work_dir.endswith('.csv'):
local_filenames = work_dir
elif load_crops or draw_bbox:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_CROP_LIST)
else:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_IMAGE_LIST)
assert os.path.isfile(local_filenames), "Failed to find fastdup input file " + local_filenames
nrows = find_nrows(kwargs)
import pandas as pd
filenames = pd.read_csv(local_filenames, nrows=nrows)
assert len(filenames), "Empty dataframe found " + local_filenames
assert 'filename' in filenames.columns, f"Error: Failed to find filename column in {work_dir}/atrain_{FILENAME_IMAGE_LIST}"
if load_crops and not draw_bbox:
assert 'crop_filename' in filenames.columns, f"Failed to load crop filename {local_filenames}"
filenames["filename"] = filenames["crop_filename"]
return filenames
def merge_with_filenames(df, filenames):
df2 = df.merge(filenames, left_on='from', right_on='index').merge(filenames, left_on='to', right_on='index')
assert df2 is not None and len(df2), f"Failed to merge similarity/outliers with atrain_features.dat.csv file, \n{df.head()}, \n{filenames.head()}"
df = df2
del df['from']
del df['to']
del df['index_x']
del df['index_y']
df = df.rename(columns={'filename_x': 'from', 'filename_y': 'to'})
return df
def get_bounding_box_func_helper(get_bounding_box_func):
if get_bounding_box_func is None:
return None
import pandas as pd
if callable(get_bounding_box_func) or isinstance(get_bounding_box_func, dict):
return get_bounding_box_func
elif isinstance(get_bounding_box_func, str):
if os.path.isfile(get_bounding_box_func):
df = pd.read_csv(get_bounding_box_func)
elif os.path.isdir(get_bounding_box_func):
local_file = os.path.join(get_bounding_box_func, "atrain_crops.csv")
assert os.path.exists(local_file), "Failed to find bounding box file in " + local_file
df = pd.read_csv(os.path.join(get_bounding_box_func, "atrain_crops.csv"))
else:
assert False, "Failed to find input file/folder " + get_bounding_box_func
elif isinstance(get_bounding_box_func, pd.DataFrame):
df = get_bounding_box_func
else:
assert False, "get_bounding_box_func should be a callable function, a dictionary, a file with bounding box info or a dataframe"
assert len(df), "Empty dataframe with bounding box information"
assert "filename" in df.columns
assert "row_y" in df.columns
assert "col_x" in df.columns
assert "width" in df.columns
assert "height" in df.columns
df["bbox"] = df.apply(lambda x: [x["col_x"], x["row_y"], x["col_x"] + x["width"], x["row_y"] + x["height"]], axis=1)
df = df.groupby('filename')['bbox'].apply(list).reset_index()
my_dict = df.set_index('filename')['bbox'].to_dict()
return my_dict
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_outliers_gallery` function. Write a Python function `def do_create_outliers_gallery(outliers_file, save_path, num_images=20, lazy_load=False, get_label_func=None, how='one', slice=None, descending=True, max_width=None, get_bounding_box_func=None, get_reformat_filename_func=None, get_extra_col_func=None, input_dir= None, work_dir = None, **kwargs)` to solve the following problem:
Function to create and display a gallery of images computed by the outliers metrics Parameters: outliers_file (str): csv file with the computed outliers by the fastdup tool. Altenriously, this can be a pandas dataframe with the computed outliers. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. how (str): Optional outlier selection method. one = take the image that is far away from any one image (but may have other images close to it). all = take the image that is far away from all other images. Default is one. slice (str): Optional parameter to select a slice of the outliers file based on a specific label. descending (boolean): Optional parameter to set the order of the components. Default is True namely list components from largest to smallest. max_width (int): Optional parameter to set the max width of the gallery. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string. The input is an absolute path to the image and the output is the string to display instead of the filename. get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' work_dir (str): Optional parameter to specify the working directory in case of giving an hourlier file which is a dataframe. Returns: ret (int): 0 if successful, 1 otherwise
Here is the function:
def do_create_outliers_gallery(outliers_file, save_path, num_images=20, lazy_load=False, get_label_func=None,
how='one', slice=None, descending=True, max_width=None, get_bounding_box_func=None, get_reformat_filename_func=None,
get_extra_col_func=None, input_dir= None, work_dir = None, **kwargs):
'''
Function to create and display a gallery of images computed by the outliers metrics
Parameters:
outliers_file (str): csv file with the computed outliers by the fastdup tool. Altenriously, this can be a pandas dataframe with the computed outliers.
save_path (str): output folder location for the visuals
num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory.
lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size).
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
how (str): Optional outlier selection method. one = take the image that is far away from any one image (but may have other images close to it).
all = take the image that is far away from all other images. Default is one.
slice (str): Optional parameter to select a slice of the outliers file based on a specific label.
descending (boolean): Optional parameter to set the order of the components. Default is True namely list components from largest to smallest.
max_width (int): Optional parameter to set the max width of the gallery.
get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image.
The input is an absolute path to the image and the output is a list of bounding boxes.
Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]]
Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename.
Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists
get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string.
The input is an absolute path to the image and the output is the string to display instead of the filename.
get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery.
input_dir (str): Optional parameter to specify the input directory of webdataset tar files,
in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
work_dir (str): Optional parameter to specify the working directory in case of giving an hourlier file which is a dataframe.
Returns:
ret (int): 0 if successful, 1 otherwise
'''
nrows = None
if 'nrows' in kwargs:
nrows = kwargs['nrows']
hierarchical_run = 'hierarchical_run' in kwargs and kwargs['hierarchical_run']
debug_hierarchical = 'debug_hierarchical' in kwargs and kwargs['debug_hierarchical']
save_artifacts = 'save_artifacts' in kwargs and kwargs['save_artifacts']
save_dir = calc_save_dir(save_path)
img_paths = []
kwargs['lazy_load'] = lazy_load
df = outliers_file
df = convert_v1_to_v02(df)
if df['from'].dtype in [int, np.int64]:
filenames = load_filenames(work_dir, kwargs)
df = merge_with_filenames(df, filenames)
get_bounding_box_func = get_bounding_box_func_helper(get_bounding_box_func)
if (how == 'all'):
if isinstance(outliers_file, pd.DataFrame):
assert work_dir is not None and isinstance(work_dir, str) and os.path.isdir(work_dir), "Failed to find fastdup work_dir folder, please rerun with work_dir pointing to fastdup run"
dups_file = os.path.join(work_dir, FILENAME_SIMILARITY)
assert os.path.exists(dups_file), f'Failed to find input file {dups_file} which is needed for computing how=all similarities, . Please run using fastdup.run(..) to generate this file.'
dups = pd.read_csv(dups_file, nrows=nrows)
assert len(dups), "Error: Failed to locate similarity file file " + dups_file
dups = dups[dups['distance'] >= dups['distance'].astype(float).mean()]
assert len(dups), f"Did not find any images with similarity more than the mean {dups['distance'].mean()}"
if dups['from'].dtype in [int, np.int64]:
filenames = load_filenames(work_dir, kwargs)
dups = merge_with_filenames(dups, filenames)
joined = df.merge(dups, on='from', how='left')
joined = joined[pd.isnull(joined['distance_y'])]
assert len(joined), 'Failed to find outlier images that are not included in the duplicates similarity files, run with how="one".'
df = joined.rename(columns={"distance_x": "distance", "to_x": "to"})
comp_images = []
comp_map = {}
if hierarchical_run:
if debug_hierarchical:
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df, comp_images, comp_map = prepare_hierarchy(df, work_dir, save_dir, debug_hierarchical, kwargs)
if get_label_func is not None and slice is not None:
df = find_label(get_label_func, df, 'from', 'label', kwargs)
df = slice_df(df, slice, 'label')
assert df is not None, f"Failed to find any labels with {slice} value"
df = df.drop_duplicates(subset='from')
if 'external_df' not in kwargs:
df = df.sort_values(by='distance', ascending=not descending)
df = df.head(num_images)
if get_label_func is not None and slice is None and 'label' not in df.columns:
df = find_label(get_label_func, df, 'from', 'label', kwargs)
all_args = []
for i, row in tqdm(df.iterrows(), total=min(num_images, len(df)), desc="Generating gallery"):
args = row, work_dir, input_dir, get_bounding_box_func, max_width, save_dir, kwargs
all_args.append(args)
# trying to deal with the following runtime error:
#An attempt has been made to start a new process before the
#current process has finished its bootstrapping phase.
parallel_run = 'parallel_run' in kwargs and kwargs['parallel_run']
if parallel_run:
try:
with Pool() as pool:
img_paths = pool.map(load_one_image_for_outliers, all_args)
except RuntimeError as e:
fastdup_capture_exception("create_outliers_gallery_pool", e)
else:
for i in all_args:
img_paths.append(load_one_image_for_outliers(i))
import fastdup.html_writer
img_html = format_image_html_string(img_paths, lazy_load, max_width, save_dir)
df.insert(0, 'Image', img_html)
df = df.rename(columns={'distance':'Distance','from':'Path'}, inplace=False)
out_file = os.path.join(save_path, 'outliers.html') if not str(save_path).endswith(".html") else save_path
title = 'Outliers Report'
subtitle = "Showing image outliers, one per row"
if slice is not None:
title += ", " + str(slice)
cols = ['Image','Distance','Path']
if callable(get_extra_col_func):
df['extra'] = df['Path'].apply(lambda x: get_extra_col_func(x))
cols.append('extra')
# if get_reformat_filename_func is not None and callable(get_reformat_filename_func):
# df['Path'] = df['Path'].apply(lambda x: get_reformat_filename_func(x))
if 'label' in df.columns:
cols.append('label')
reformat_disp_path = kwargs.get('get_display_filename_func', lambda x: x)
df['Path'] = df['Path'].apply(lambda x: reformat_disp_path(x))
df['info'] = swap_dataframe(df, cols)
fastdup.html_writer.write_to_html_file(df[['Image','info']], title, out_file, subtitle=subtitle, jupyter_html=kwargs.get('jupyter_html', False))
if save_artifacts:
df[list(set(cols)-set(['Image']))].to_csv(f'{save_path}/outliers_report.csv')
assert os.path.exists(out_file), "Failed to generate out file " + out_file
if hierarchical_run:
print("Stored outliers hierarchical view in ", os.path.join(out_file))
else:
print_success_msg("outliers", out_file, lazy_load)
clean_images(lazy_load or save_artifacts, img_paths, "create_outliers_gallery")
return 0 | Function to create and display a gallery of images computed by the outliers metrics Parameters: outliers_file (str): csv file with the computed outliers by the fastdup tool. Altenriously, this can be a pandas dataframe with the computed outliers. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. how (str): Optional outlier selection method. one = take the image that is far away from any one image (but may have other images close to it). all = take the image that is far away from all other images. Default is one. slice (str): Optional parameter to select a slice of the outliers file based on a specific label. descending (boolean): Optional parameter to set the order of the components. Default is True namely list components from largest to smallest. max_width (int): Optional parameter to set the max width of the gallery. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow changing the presented filename into another string. The input is an absolute path to the image and the output is the string to display instead of the filename. get_extra_col_func (callable): Optional parameter to allow adding extra columns to the gallery. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' work_dir (str): Optional parameter to specify the working directory in case of giving an hourlier file which is a dataframe. Returns: ret (int): 0 if successful, 1 otherwise |
6,500 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def format_image_html_string(img_paths, lazy_load, max_width, save_path=None):
if not lazy_load:
return [imageformatter(x, max_width) for x in img_paths]
else:
return ["<img src=\"" + shorten_image(x, save_path) + "\" loading=\"lazy\">" for x in img_paths]
def visualize_top_components(work_dir, save_path, num_components, get_label_func=None, group_by='visual', slice=None,
get_bounding_box_func=None, max_width=None, threshold=None, metric=None, descending=True,
max_items = None, min_items=None, keyword=None, comp_type="component",
input_dir=None, kwargs=None):
'''
Visualize the top connected components
Args:
work_dir (str): directory with the output of fastdup run or a dataframe with the content of connected_components.csv
save_path (str): directory to save the output to
num_components (int): number of top components to plot
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
group_by (str): 'visual' or 'label'
slice (str): slice the dataframe based on the label or a list of labels
get_bounding_box_func (callable): option function to get bounding box for each image given image filename
max_width (int): optional maximum width of the image
threshold (float): optional threshold to filter out components with similarity less than this value
metric (str): optional metric to use for sorting the components
descending (bool): optional sort in descending order
max_items (int): optional max number of items to include in the component, namely show only components with less than max_items items
min_items (int): optional min number of items to include in the component, namely show only components with at least this many items
keyword (str): optional keyword to filter out components with labels that do not contain this keyword
return_stats (bool): optional return the stats of the components namely statistics about component sizes
component_type (str): comp type, should be one of component|cluster
input_dir (str): Optional parameter to specify the input directory of webdataset tar files,
in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
Returns:
ret (pd.DataFrame): with the top components
img_list (list): of the top components images
stats (pd.DataFrame): optional return value of the stats of the components namely statistics about component sizes
'''
try:
from fastdup.tensorboard_projector import generate_sprite_image
import traceback
except Exception as ex:
print(ex)
fastdup_capture_exception("visualize_top_components", ex)
return None, None
assert num_components > 0, "Number of components should be larger than zero"
MAX_IMAGES_IN_GRID = 54
#v1 = 'id_to_filename_func' in kwargs
if isinstance(work_dir, pd.DataFrame):
if 'distance' in work_dir.columns and 'component_id' in work_dir.columns \
and 'files' in work_dir.columns and 'len' in work_dir.columns and 'files_ids' in work_dir.columns and len(
work_dir):
if slice is not None:
assert 'label' in work_dir.columns, "Failed to find 'label' in dataframe, when using slice string need to provide label column"
kwargs['grouped'] = True
top_components = slice_df(work_dir, slice, 'label', kwargs)
else:
assert False, f"Got dataframe with the columns: {work_dir.columns} while expecting to get the columns: \
['component_id', 'distance', 'files', 'files_ids', 'len'] and optionally label and or crop_filename. \
component_id is integer index of cluster, files, files_ids, label, crop_filename are lists of files in the component. files include the filenames, files_ids are integer unique indexe for the files\
label is an optional list of labels per ima, crop_filename are optional list of crops. "
else:
top_components = do_find_top_components(work_dir=work_dir, get_label_func=get_label_func, group_by=group_by,
slice=slice, threshold=threshold, metric=metric, descending=descending,
max_items=max_items, min_items=min_items, keyword=keyword, save_path=save_path,
input_dir=input_dir,
comp_type=comp_type, kwargs=kwargs)
assert top_components is not None, f"Failed to find components with more than {min_items} images. Try to run fastdup.run() with turi_param='ccthreshold=0.9' namely to lower the threshold for grouping components"
top_components = top_components.head(num_components)
if 'debug_cc' in kwargs:
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
print(top_components.head())
save_artifacts = 'save_artifacts' in kwargs and kwargs['save_artifacts']
keep_aspect_ratio = 'keep_aspect_ratio' in kwargs and kwargs['keep_aspect_ratio']
assert top_components is not None and len(top_components), f"Failed to find components with more than {min_items} images. Try to run fastdup.run() with turi_param='ccthreshold=0.9' namely to lower the threshold for grouping components"
comp_col = "component_id" if comp_type == "component" else "cluster"
save_dir = calc_save_dir(save_path)
save_dir = os.path.join(save_dir, "images")
lazy_load = kwargs.get('lazy_load', False)
subfolder = "" if not lazy_load else "images/"
os.makedirs(os.path.join(save_dir, subfolder), exist_ok=True)
# iterate over the top components
img_paths = []
counter = 0
#filname_transform_func = kwargs.get('id_to_filename_func', lambda x: x)
all_labels = []
for i,row in tqdm(top_components.iterrows(), total = len(top_components), desc="Generating gallery"):
try:
# find the component id
component_id = row[comp_col]
# find all the image filenames linked to this `id`
if save_artifacts:
if not os.path.exists(os.path.join(save_path, 'images')):
os.mkdir(os.path.join(save_path, 'images'))
pd.DataFrame({'filename':row['files']}).to_csv(os.path.join(save_path, 'images', f'component_{counter}_files.csv'))
files_ids = sample_from_components(row, metric, kwargs, MAX_IMAGES_IN_GRID)
if (len(files_ids) == 0):
print(f"Failed to find any files for component_id {component_id}");
break
files, files_ids = zip(*files_ids)
# if v1 and isinstance(files[0], str):
# files = [os.path.join(input_dir, x) for x in files]
if save_artifacts:
if not os.path.exists(os.path.join(save_path , "images", f"raw_images_{counter}")):
os.mkdir(os.path.join(save_path, "images", f"raw_images_{counter}"))
for f in files:
shutil.copy(f, os.path.join(save_path, "images", f"raw_images_{counter}"))
tmp_images = []
w,h = [], []
val_array = []
for f, fid in zip(files, files_ids):
assert not pd.isnull(f), f"Found None image name on {fid} {input_dir} {row}"
#if v1:
# assert isinstance(fid, (int,np.uint32)), f"found a wrong file_id {fid} {type(fid)}"
val_array.append([f, fid, input_dir, get_bounding_box_func, kwargs])
# trying to deal with the following runtime error:
#An attempt has been made to start a new process before the
#current process has finished its bootstrapping phase.
parallel_run = 'parallel_run' in kwargs and kwargs['parallel_run']
if parallel_run:
try:
with Pool() as pool:
result = pool.map(load_one_image, val_array)
except RuntimeError as e:
fastdup_capture_exception("visualize_top_components", e)
else:
result = []
for i in val_array:
img = load_one_image(i)
if img[0] is not None:
result.append(img)
for t,x in enumerate(result):
if x[0] is not None:
if save_artifacts:
if not os.path.exists(f'{save_path}/images/comp_{counter}/'):
os.mkdir(f'{save_path}/images/comp_{counter}')
cv2.imwrite(f'{save_path}/images/comp_{counter}/{os.path.basename(files[t])}', x[0])
tmp_images.append(x[0])
w.append(x[1])
h.append(x[2])
assert len(tmp_images),"Failed to read all images"
avg_h = int(np.mean(h))
avg_w = int(np.mean(w))
max_h = int(np.max(h))
max_w = int(np.max(w))
if keep_aspect_ratio:
avg_h = max_h
avg_w = max_w
images = []
for f in tmp_images:
assert f is not None, "Failed to read image"
if not keep_aspect_ratio:
f = cv2.resize(f, (avg_w,avg_h))
else:
f = pad_image(f, avg_w, avg_h)
images.append(f)
labels = row['label'] if 'label' in row else None
if len(images) <= 3:
img, labels = generate_sprite_image(images, len(images), '', labels, h=avg_h, w=avg_w, alternative_width=len(images), max_width=max_width)
else:
img, labels = generate_sprite_image(images, len(images), '', labels, h=avg_h, w=avg_w, max_width=max_width)
all_labels.append(labels)
#all_files.append(files)
if group_by == "label":
local_file = os.path.join(save_dir, f'{subfolder}component_{counter}_{row["label"]}.jpg')
else:
local_file = os.path.join(save_dir, f'{subfolder}component_{counter}_{component_id}.jpg')
fastdup_imwrite(local_file, img)
img_paths.append(local_file)
counter+=1
except ModuleNotFoundError as ex:
print('Your system is missing some dependencies please install then with pip install:')
fastdup_capture_exception("visualize_top_components", ex)
except Exception as ex:
print('Failed on component', i, ex)
fastdup_capture_exception("visualize_top_components", ex)
print(f'Finished OK. Components are stored as image files {save_path}/components_[index].jpg')
if 'label' in top_components:
top_components['label'] = top_components['label'].apply(lambda x: x[:MAX_IMAGES_IN_GRID])
#top_components['files'] = all_files
return top_components.head(num_components), img_paths
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def clean_images(lazy_load, img_paths, section):
if not lazy_load:
for i in img_paths:
try:
if i is not None and os.path.exists(i):
os.unlink(i)
except Exception as ex:
print("Failed to delete image file ", i, ex)
fastdup_capture_exception(section, ex)
def calc_save_dir(save_path):
save_dir = save_path
if save_dir.endswith(".html"):
save_dir = os.path.dirname(save_dir)
if save_dir == "":
save_dir = "."
return save_dir
def get_bounding_box_func_helper(get_bounding_box_func):
if get_bounding_box_func is None:
return None
import pandas as pd
if callable(get_bounding_box_func) or isinstance(get_bounding_box_func, dict):
return get_bounding_box_func
elif isinstance(get_bounding_box_func, str):
if os.path.isfile(get_bounding_box_func):
df = pd.read_csv(get_bounding_box_func)
elif os.path.isdir(get_bounding_box_func):
local_file = os.path.join(get_bounding_box_func, "atrain_crops.csv")
assert os.path.exists(local_file), "Failed to find bounding box file in " + local_file
df = pd.read_csv(os.path.join(get_bounding_box_func, "atrain_crops.csv"))
else:
assert False, "Failed to find input file/folder " + get_bounding_box_func
elif isinstance(get_bounding_box_func, pd.DataFrame):
df = get_bounding_box_func
else:
assert False, "get_bounding_box_func should be a callable function, a dictionary, a file with bounding box info or a dataframe"
assert len(df), "Empty dataframe with bounding box information"
assert "filename" in df.columns
assert "row_y" in df.columns
assert "col_x" in df.columns
assert "width" in df.columns
assert "height" in df.columns
df["bbox"] = df.apply(lambda x: [x["col_x"], x["row_y"], x["col_x"] + x["width"], x["row_y"] + x["height"]], axis=1)
df = df.groupby('filename')['bbox'].apply(list).reset_index()
my_dict = df.set_index('filename')['bbox'].to_dict()
return my_dict
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_components_gallery` function. Write a Python function `def do_create_components_gallery(work_dir, save_path, num_images=20, lazy_load=False, get_label_func=None, group_by='visual', slice=None, max_width=None, max_items=None, min_items=None, get_bounding_box_func=None, get_reformat_filename_func=None, get_extra_col_func=None, threshold=None ,metric=None, descending=True, keyword=None, comp_type="component", input_dir=None, **kwargs)` to solve the following problem:
Function to create and display a gallery of images for the largest graph components Parameters: work_dir (str): path to fastdup work_dir. Alternatively (for advanced users): * pd.DataFrame containing the content of connected_components.csv file. The file columns should contain: __id,component_id,min_distance. * or pd.DataFrame containing the top components. The df should include the fields: component_id,files,distance,len. Where component_id is integer, files is a list of files in this component, files is a list of absoluate image filenames in the component, distance is float in the range 0..1, len the files len. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. group_by (str): [visual|label]. Group the report using the visual properties of the image or using the labels of the images. Default is visual. slice(str): optional label to draw only a subset of the components conforming to this label. Or a list of labels. max_width (int): optional parameter to control resulting html width. Default is None max_items (int): optional parameter to control th number of items displayed in statistics: top max_items labels (for group_by='visual') or top max_items components (for group_by='label'). Default is None namely show all items. min_items (int): optional parameter to select only components with at least min_items items. Default is None. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists (callable): optional function to get bounding box of an image and add them to the report get_reformat_filename_func (callable): optional function to reformat the filename to be displayed in the report get_extra_col_func (callable): optional function to get extra column to be displayed in the report threshold (float): optional parameter to filter out components with distance below threshold. Default is None. metric (str): optional parameter to specify the metric used to chose the components. Default is None. descending (boolean): optional parameter to specify the order of the components. Default is True namely components are given from largest to smallest. keyword (str): optional parameter to select only components with a keyword as a substring in the label. Default is None. comp_type (str): optional parameter, default is "component" (for visualizing connected components) other option is "cluster" (for visualizing kmeans) input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' kwargs (dict): Optional parameter to pass additional parameters to the function. split_sentence_to_label_list (boolean): Optional parameter to split the label into a list of labels. Default is False. limit_labels_printed (int): Optional parameter to limit the number of labels printed in the html report. Default is max_items. nrows (int): limit the number of read rows for debugging purposes of the report
Here is the function:
def do_create_components_gallery(work_dir, save_path, num_images=20, lazy_load=False, get_label_func=None,
group_by='visual', slice=None, max_width=None, max_items=None, min_items=None,
get_bounding_box_func=None, get_reformat_filename_func=None, get_extra_col_func=None,
threshold=None ,metric=None, descending=True, keyword=None, comp_type="component", input_dir=None,
**kwargs):
'''
Function to create and display a gallery of images for the largest graph components
Parameters:
work_dir (str): path to fastdup work_dir. Alternatively (for advanced users):
* pd.DataFrame containing the content of connected_components.csv file. The file columns should contain: __id,component_id,min_distance.
* or pd.DataFrame containing the top components. The df should include the fields: component_id,files,distance,len. Where component_id is integer, files is a list of files
in this component, files is a list of absoluate image filenames in the component, distance is float in the range 0..1, len the files len.
save_path (str): output folder location for the visuals
num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory.
lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size).
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
group_by (str): [visual|label]. Group the report using the visual properties of the image or using the labels of the images. Default is visual.
slice(str): optional label to draw only a subset of the components conforming to this label. Or a list of labels.
max_width (int): optional parameter to control resulting html width. Default is None
max_items (int): optional parameter to control th number of items displayed in statistics: top max_items labels (for group_by='visual')
or top max_items components (for group_by='label'). Default is None namely show all items.
min_items (int): optional parameter to select only components with at least min_items items. Default is None.
get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image.
The input is an absolute path to the image and the output is a list of bounding boxes.
Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]]
Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename.
Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists (callable): optional function to get bounding box of an image and add them to the report
get_reformat_filename_func (callable): optional function to reformat the filename to be displayed in the report
get_extra_col_func (callable): optional function to get extra column to be displayed in the report
threshold (float): optional parameter to filter out components with distance below threshold. Default is None.
metric (str): optional parameter to specify the metric used to chose the components. Default is None.
descending (boolean): optional parameter to specify the order of the components. Default is True namely components are given from largest to smallest.
keyword (str): optional parameter to select only components with a keyword as a substring in the label. Default is None.
comp_type (str): optional parameter, default is "component" (for visualizing connected components) other option is "cluster" (for visualizing kmeans)
input_dir (str): Optional parameter to specify the input directory of webdataset tar files,
in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
kwargs (dict): Optional parameter to pass additional parameters to the function.
split_sentence_to_label_list (boolean): Optional parameter to split the label into a list of labels. Default is False.
limit_labels_printed (int): Optional parameter to limit the number of labels printed in the html report. Default is max_items.
nrows (int): limit the number of read rows for debugging purposes of the report
'''
start = time.time()
#v1 = 'id_to_filename_func' in kwargs
if num_images > 1000 and not lazy_load:
print("Warning: When plotting more than 1000 images, please run with lazy_load=True. Chrome and Safari support lazy loading of web images, otherwise the webpage gets too big")
assert num_images >= 1, "Please select one or more images"
assert group_by == 'label' or group_by == 'visual', "Allowed values for group_by=[visual|label], got " + group_by
if group_by == 'label':
assert get_label_func is not None, "missing get_label_func, when grouping by labels need to set get_label_func"
assert comp_type in ['component','cluster']
num_items_title = 'num_images' if 'is_video' not in kwargs else 'num_videos'
if isinstance(work_dir, pd.DataFrame):
run_hierarchical = False
else:
run_hierarchical = (work_dir.endswith("csv") and "hierarchical" in work_dir) or \
(kwargs.get('run_hierarchical', False))
get_bounding_box_func = get_bounding_box_func_helper(get_bounding_box_func)
kwargs['lazy_load'] = lazy_load
kwargs['run_hierarchical'] = run_hierarchical
if 'selection_strategy' not in kwargs:
kwargs['selection_strategy'] = SELECTION_STRATEGY_FIRST
else:
assert isinstance(kwargs['selection_strategy'],int) and kwargs['selection_strategy'] >= 0 and kwargs['selection_strategy'] <= 2
save_dir = calc_save_dir(save_path)
ret = visualize_top_components(work_dir, save_dir, num_images,
get_label_func, group_by, slice,
get_bounding_box_func, max_width, threshold, metric,
descending, max_items, min_items, keyword,
comp_type=comp_type, input_dir=input_dir, kwargs=kwargs)
if ret is None:
return None
subdf, img_paths = ret
if subdf is None or len(img_paths) == 0:
return None
assert len(subdf) == len(img_paths), "Number of components and number of images do not match"
import fastdup.html_writer
save_artifacts= 'save_artifacts' in kwargs and kwargs['save_artifacts']
if save_artifacts:
subdf.to_csv(f'{save_dir}/components.csv')
comp_col = "component_id" if comp_type == "component" else "cluster"
cols_dict = {comp_col:subdf[comp_col].values,'files':subdf['files'].values,
num_items_title:subdf['len'].apply(lambda x: "{:,}".format(x)).values}
if 'distance' in subdf.columns:
cols_dict['distance'] = subdf['distance'].values
if 'label' in subdf.columns:
cols_dict['label'] = subdf['label'].values
elif 'is_video' in kwargs:
cols_dict['num_images'] = subdf['orig_len'].apply(lambda x: "{:,}".format(x)).values
subdf['label'] = subdf['video']
if metric in subdf.columns:
cols_dict[metric] = subdf[metric].apply(lambda x: round(np.mean(x),2)).values
ret2 = pd.DataFrame(cols_dict)
info_list = []
counter =0
for i,row in ret2.iterrows():
if group_by == 'visual':
comp = row[comp_col]
num = row[num_items_title]
dict_rows = {'component':[comp], num_items_title :[num]}
if 'distance' in row:
dist = row['distance']
dict_rows['mean_distance'] = [np.mean(dist)]
if metric is not None:
dict_rows[metric] = [row[metric]]
if kwargs and 'is_video' in kwargs:
dict_rows['num_images'] = row['num_images']
info_df = pd.DataFrame(dict_rows).T
info_list.append(info_df)
elif group_by == 'label':
label = row['label']
num = row[num_items_title]
dict_rows = {'label':[label], num_items_title :[num]}
if 'distance' in row:
dist = row['distance']
dict_rows['mean_distance'] = [np.mean(dist)]
if metric is not None:
dict_rows[metric] = [row[metric]]
info_df = pd.DataFrame(dict_rows).T
info_list.append(info_df)
#if save_artifacts:
# info_df.to_csv(f'{save_dir}/component_{counter}_df.csv')
counter += 1
ret = pd.DataFrame({'info': info_list})
if 'label' in subdf.columns:
if group_by == 'visual':
labels_table = []
counter = 0
for i,row in subdf.iterrows():
labels = list(row['label'])
#if save_artifacts:
# pd.DataFrame({'label':labels,'files':list(row['files'])}).to_csv(os.path.join(save_dir, f"component_{counter}_labels.csv", index=False))
if callable(get_reformat_filename_func) and 'is_video' in kwargs:
labels = [get_reformat_filename_func(x) for x in labels]
unique, counts = np.unique(np.array(labels), return_counts=True)
lencount = len(counts)
if max_items is not None and max_items < lencount:
lencount = max_items
if 'limit_labels_printed' in kwargs:
lencount = kwargs['limit_labels_printed']
counts_df = pd.DataFrame({"counts":counts}, index=unique).sort_values('counts', ascending=False)
if save_artifacts:
counts_df.to_csv(f'{save_dir}/counts_{counter}.csv')
counts_df = counts_df.head(lencount)#.reset_index().rename({'index': 'label'}, axis=1)
counts_df.index.names = ['label']
# counts_df = counts_df
labels_table.append(counts_df)
counter+=1
ret.insert(0, 'label', labels_table)
else:
comp_table = []
counter = 0
for i,row in subdf.iterrows():
unique, counts = np.unique(np.array(row[comp_col]), return_counts=True)
lencount = len(counts)
if max_items is not None and max_items < lencount:
lencount = max_items;
if kwargs is not None and 'limit_labels_printed' in kwargs:
lencount = kwargs['limit_labels_printed']
counts_df = pd.DataFrame({"counts":counts}, index=unique).sort_values('counts', ascending=False)
#if save_artifacts:
# counts_df.to_csv(f'{save_dir}/counts_{counter}.csv')
counts_df = counts_df.head(lencount)
comp_table.append(counts_df)
counter+=1
ret.insert(0, 'components', comp_table)
img_html = format_image_html_string(img_paths, lazy_load, max_width, save_path)
ret.insert(0, 'image', img_html)
if str(save_path).endswith('.html'):
out_file = save_path
else:
out_file = os.path.join(save_dir, "components_hierarchical.html") if run_hierarchical else os.path.join(save_dir, 'components.html')
columns = ['info','image']
if 'label' in subdf.columns:
if group_by == 'visual':
columns.append('label')
elif group_by == 'label':
columns.append('components')
if comp_type == "component":
if 'is_video' in kwargs:
title = 'Video Components Report'
subtitle = "Showing groups of similar videos"
elif run_hierarchical:
title = 'Hierarchical Components Report'
subtitle = "Showing hierarchical groups of similar images"
else:
title = 'Components Report'
subtitle = "Showing groups of similar images"
else:
title = "KMeans Cluster Report"
subtitle = "Showing groups of similar images"
if slice is not None:
if slice == "diff":
subtitle += ", from different classes"
elif slice =="same":
subtitle += ", from the same class"
else:
subtitle += ", for label: " + str(slice)
if metric is not None:
subtitle = ", Sorted by " + metric + " descending" if descending else "Sorted by " + metric + " ascending"
ret = ret[['image','info', 'label']] if 'label' in ret.columns else ret[['image','info']]
if callable(get_extra_col_func):
ret['files'] = subdf['files'].values#.apply(lambda x: [get_extra_col_func(y) for y in x])
fastdup.html_writer.write_to_html_file(ret, title, out_file, None, subtitle, max_width,
jupyter_html=kwargs.get('jupyter_html', False))
assert os.path.exists(out_file), "Failed to generate out file " + out_file
if comp_type == "component":
print_success_msg('components', out_file, lazy_load)
else:
print_success_msg("kmeans clusters", out_file, lazy_load)
clean_images(lazy_load or save_artifacts or (threshold is not None), img_paths, "create_components_gallery")
print('Execution time in seconds', round(time.time() - start, 1))
return 0 | Function to create and display a gallery of images for the largest graph components Parameters: work_dir (str): path to fastdup work_dir. Alternatively (for advanced users): * pd.DataFrame containing the content of connected_components.csv file. The file columns should contain: __id,component_id,min_distance. * or pd.DataFrame containing the top components. The df should include the fields: component_id,files,distance,len. Where component_id is integer, files is a list of files in this component, files is a list of absoluate image filenames in the component, distance is float in the range 0..1, len the files len. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. group_by (str): [visual|label]. Group the report using the visual properties of the image or using the labels of the images. Default is visual. slice(str): optional label to draw only a subset of the components conforming to this label. Or a list of labels. max_width (int): optional parameter to control resulting html width. Default is None max_items (int): optional parameter to control th number of items displayed in statistics: top max_items labels (for group_by='visual') or top max_items components (for group_by='label'). Default is None namely show all items. min_items (int): optional parameter to select only components with at least min_items items. Default is None. get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists (callable): optional function to get bounding box of an image and add them to the report get_reformat_filename_func (callable): optional function to reformat the filename to be displayed in the report get_extra_col_func (callable): optional function to get extra column to be displayed in the report threshold (float): optional parameter to filter out components with distance below threshold. Default is None. metric (str): optional parameter to specify the metric used to chose the components. Default is None. descending (boolean): optional parameter to specify the order of the components. Default is True namely components are given from largest to smallest. keyword (str): optional parameter to select only components with a keyword as a substring in the label. Default is None. comp_type (str): optional parameter, default is "component" (for visualizing connected components) other option is "cluster" (for visualizing kmeans) input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' kwargs (dict): Optional parameter to pass additional parameters to the function. split_sentence_to_label_list (boolean): Optional parameter to split the label into a list of labels. Default is False. limit_labels_printed (int): Optional parameter to limit the number of labels printed in the html report. Default is max_items. nrows (int): limit the number of read rows for debugging purposes of the report |
6,501 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
try:
from tqdm.auto import tqdm
except:
tqdm = (lambda x, total=None, desc=None: x)
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def format_image_html_string(img_paths, lazy_load, max_width, save_path=None):
if not lazy_load:
return [imageformatter(x, max_width) for x in img_paths]
else:
return ["<img src=\"" + shorten_image(x, save_path) + "\" loading=\"lazy\">" for x in img_paths]
def swap_dataframe(subdf, cols):
cols_no_images = [x for x in cols if (x.lower() != 'image' and not x.startswith('info') and x.lower()!= 'similar')]
new_rows = []
for i,row in subdf[cols_no_images].iterrows():
dfrow = pd.DataFrame(row)
new_rows.append(dfrow)
return new_rows
def find_label(get_label_func, df, in_col, out_col, vqa_prompt: str = None, kwargs=None):
if (get_label_func is not None):
if isinstance(get_label_func, str):
if os.path.exists(get_label_func):
df_labels = load_labels(get_label_func, kwargs)
assert len(df_labels) == len(df), f"Error: wrong length of labels file {get_label_func} expected {len(df)} got {len(df_labels)}"
df[out_col] = df_labels['label']
elif get_label_func in df.columns:
df[out_col] = df['label']
elif get_label_func in CAPTION_MODEL_NAMES:
from fastdup.captions import generate_labels
df[out_col] = generate_labels(df[in_col], get_label_func, device='cpu')
elif get_label_func == VQA_MODEL1_NAME:
from fastdup.captions import generate_vqa_labels
df[out_col] = generate_vqa_labels(df[in_col], vqa_prompt, kwargs)
elif get_label_func == AGE_LABEL1_NAME:
from fastdup.captions import generate_age_labels
df[out_col] = generate_age_labels(df[in_col], kwargs)
else:
assert False, f"Found str label {get_label_func} but it is neither a file nor a column name in the dataframe {df.columns}"
elif isinstance(get_label_func, dict):
df[out_col] = df[in_col].apply(lambda x: get_label_func.get(x, MISSING_LABEL))
elif callable(get_label_func):
assert len(df), "Empty dataframe"
assert in_col in df.columns, f"Missing column {in_col}"
df[out_col] = df[in_col].apply(lambda x: get_label_func(x))
else:
assert False, f"Failed to understand get_label_func type {type(get_label_func)}"
if kwargs is not None and 'debug_labels' in kwargs:
print(df.head())
return df
def slice_df(df, slice, colname, kwargs=None):
assert len(df), "Df has no rows"
split_sentence_to_label_list = kwargs is not None and 'split_sentence_to_label_list' in kwargs and kwargs['split_sentence_to_label_list']
debug_labels = kwargs is not None and 'debug_labels' in kwargs and kwargs['debug_labels']
grouped = kwargs is not None and 'grouped' in kwargs and kwargs['grouped']
if slice is not None:
if isinstance(slice, str):
# cover the case labels are string or lists of strings
if split_sentence_to_label_list:
labels = df[colname].astype(str).apply(lambda x: split_str(x.lower())).values
if debug_labels:
print('Label with split sentence', labels[:10])
else:
labels = df[colname].astype(str).values
if debug_labels:
print('label without split sentence', labels[:10])
is_list = isinstance(labels[0], list)
if grouped:
df = df[df[colname].apply(lambda x: slice in x)]
assert len(df), f"Failed to find any labels with value={slice}"
elif is_list:
labels = [item for sublist in labels for item in sublist]
if debug_labels:
print('labels after merging sublists', labels[:10])
df = df[df[colname].apply(lambda x: slice in [y.lower() for y in x])]
else:
df2 = df[df[colname] == slice]
if len(df2) == 0:
df2 = df[df[colname].apply(lambda x: slice in str(x))]
df = df2
elif isinstance(slice, list):
if isinstance(df[colname].values[0], list):
df = df[df[colname].apply(lambda x: len(set(x)&set(slice)) > 0)]
else:
df = df[df[colname].isin(slice)]
assert len(df), f"Failed to find any labels with {slice}"
else:
assert False, "slice must be a string or a list of strings"
return df
def lookup_filename(filename, work_dir):
assert isinstance(filename, str), f"Wrong for type {filename} {type(filename)}"
if os.path.exists(filename):
return filename
if filename.startswith(S3_TEMP_FOLDER + get_sep()) or filename.startswith(S3_TEST_TEMP_FOLDER + get_sep()):
assert work_dir is not None, f"Failed to find work_dir on remote_fs: filename was {filename}"
filename = os.path.join(work_dir, filename)
return filename
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def calc_image_path(lazy_load, save_path, filename, filename_suffix=''):
if lazy_load:
os.makedirs(os.path.join(save_path, "images"), exist_ok=True)
folder = os.path.join(save_path, "images")
else:
folder = os.path.join(save_path)
filename = safe_replace(filename)
imgpath = os.path.join(folder, filename)
p, ext = os.path.splitext(imgpath)
if ext is not None and ext != '' and ext.lower() not in SUPPORTED_IMG_FORMATS:
ext += ".jpg"
imgpath = p + filename_suffix + ext
if len(imgpath) > 255:
temp_name = next(tempfile._get_candidate_names())
imgpath = os.path.join(folder, temp_name + filename_suffix + ext)
return imgpath
def clean_images(lazy_load, img_paths, section):
if not lazy_load:
for i in img_paths:
try:
if i is not None and os.path.exists(i):
os.unlink(i)
except Exception as ex:
print("Failed to delete image file ", i, ex)
fastdup_capture_exception(section, ex)
def fastdup_imread(img1_path, input_dir, kwargs):
"""
Read an image from local file, or from a tar file, or from s3/minio path using minio client mc
Parameters:
img1_path (str): path to the image
input_dir (str): optional directory path in case the image is found on a webdataset in another path or found in s3
Returns:
img1 (np.array): the image
"""
assert not pd.isnull(img1_path), f"img1_path should not be None {img1_path} {input_dir}, {kwargs}"
is_minio_or_s3 = False
if input_dir is not None and (isinstance(input_dir, str) or isinstance(input_dir, pathlib.Path)):
if input_dir.startswith('~/'):
input_dir = os.path.expanduser(input_dir)
if not input_dir.startswith("s3://") and not input_dir.startswith("minio://"):
assert os.path.exists(input_dir), "Failed to find input_dir: " + input_dir
else:
is_minio_or_s3 = True
if img1_path.startswith('~/'):
img1_path = os.path.expanduser(img1_path)
if os.path.exists(img1_path):
img = inner_read(img1_path)
return img
elif ('/' +S3_TEMP_FOLDER + '/' in img1_path or '/' + S3_TEST_TEMP_FOLDER + '/' in img1_path) and \
'.tar/' in img1_path:
assert os.path.exists(input_dir), "Failed to find input dir " + input_dir
pos = os.path.dirname(img1_path).find(input_dir.replace('/',''))
tar_file = os.path.dirname(img1_path)[pos+len(input_dir.replace('/','')):]
tar_file = os.path.join(input_dir, tar_file)
if kwargs is not None and "reformat_tar_name" in kwargs and callable(kwargs['reformat_tar_name']):
tar_file = kwargs["reformat_tar_name"](tar_file)
print('Found tar file', tar_file)
img_name = os.path.basename(img1_path)
try:
with tarfile.open(tar_file, "r") as tar:
f = tar.extractfile(img_name)
return cv2.imdecode(np.frombuffer(f.read(), np.uint8), cv2.IMREAD_COLOR)
except Exception as ex:
fastdup_capture_exception("fastdup_imread", ex)
print("Error reading from tar file: ", tar_file, ex)
return None
elif is_minio_or_s3 and input_dir is not None:
if input_dir.startswith("minio://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
minio_prefix = "/".join(input_dir.replace("minio://", "").split('/')[:2])
#print('minio_prefix', minio_prefix)
download_minio(minio_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
assert ret is not None, f"Failed to read image {os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path))}"
return ret
elif input_dir.startswith("s3://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
s3_prefix = 's3://' + "/".join(input_dir.replace("s3://", "").split('/')[:1])
#print('s3_prefix', s3_prefix)
download_s3(s3_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
return ret
#Failed to read image1 ..\milvus_vector_db\data\images\..\milvus_vector_db\data\images\Egyptian_Mau_210.jpg
elif input_dir is not None and img1_path.startswith(input_dir) and len(img1_path) >= len(input_dir) +2:
suffix = img1_path[len(input_dir):]
if input_dir in suffix and os.path.exists(suffix):
img = inner_read(suffix)
return img
elif "''" in img1_path: # try to handle french and other languages where c side doubles the '' otherwise pandas can't read it
new_img1_path = img1_path.replace("''","")
if os.path.exists(new_img1_path):
img = inner_read(new_img1_path)
return img
print('Failed to read image from img_path', img1_path)
return None
def fastdup_imwrite(local_file, im):
has_extension = check_valid_image_extension(local_file)
if has_extension:
ret = cv2.imwrite(local_file, im)
else:
local_file_wext = local_file + '.jpg'
ret = cv2.imwrite(local_file_wext, im)
assert ret, f"Failed to save img to {local_file} most likely filename is too long for the OS"
# Rename back if extension was added
os.rename(local_file_wext, local_file)
assert os.path.isfile(local_file), "Failed to save img to " + local_file
if ret == False and len(local_file) >= 254:
try:
import uuid
import shutil
file, ext = os.path.splitext(local_file)
tmp_filename = str(uuid.uuid4()) + ext
ret = cv2.imwrite(tmp_filename, im)
if os.path.exists(local_file):
os.unlink(local_file)
shutil.move(tmp_filename, local_file)
finally:
assert ret, f"Failed to save img to {local_file} most likely filename is too long for the OS"
elif ret == False:
assert ret, f"Failed to save img to {local_file}"
assert os.path.isfile(local_file), "Failed to save img to " + local_file
def my_resize(img, max_width):
if img is None:
return None
h, w, c = get_shape(img)
w1 = 320
if max_width is not None and w > max_width:
w1 = max_width
aspect = h/w
if (h > w1 or w > w1) and aspect > 0 and int(w1/aspect) > 0 and w1 > 0:
img = cv2.resize(img, (int(w1/aspect), w1))
return img
def plot_bounding_box(img, get_bounding_box_func, filename):
bbox_list = []
if callable(get_bounding_box_func):
bbox_list = get_bounding_box_func(filename)
elif isinstance(get_bounding_box_func, dict):
bbox_list = get_bounding_box_func.get(filename, [])
for i in bbox_list:
cur_bbox = i
cur_bbox = [int(x) for x in cur_bbox]
img = cv2.rectangle(img, (cur_bbox[0], cur_bbox[1]), (cur_bbox[2], cur_bbox[3]), (0, 255, 0), 3)
return img
def calc_save_dir(save_path):
save_dir = save_path
if save_dir.endswith(".html"):
save_dir = os.path.dirname(save_dir)
if save_dir == "":
save_dir = "."
return save_dir
def get_bounding_box_func_helper(get_bounding_box_func):
if get_bounding_box_func is None:
return None
import pandas as pd
if callable(get_bounding_box_func) or isinstance(get_bounding_box_func, dict):
return get_bounding_box_func
elif isinstance(get_bounding_box_func, str):
if os.path.isfile(get_bounding_box_func):
df = pd.read_csv(get_bounding_box_func)
elif os.path.isdir(get_bounding_box_func):
local_file = os.path.join(get_bounding_box_func, "atrain_crops.csv")
assert os.path.exists(local_file), "Failed to find bounding box file in " + local_file
df = pd.read_csv(os.path.join(get_bounding_box_func, "atrain_crops.csv"))
else:
assert False, "Failed to find input file/folder " + get_bounding_box_func
elif isinstance(get_bounding_box_func, pd.DataFrame):
df = get_bounding_box_func
else:
assert False, "get_bounding_box_func should be a callable function, a dictionary, a file with bounding box info or a dataframe"
assert len(df), "Empty dataframe with bounding box information"
assert "filename" in df.columns
assert "row_y" in df.columns
assert "col_x" in df.columns
assert "width" in df.columns
assert "height" in df.columns
df["bbox"] = df.apply(lambda x: [x["col_x"], x["row_y"], x["col_x"] + x["width"], x["row_y"] + x["height"]], axis=1)
df = df.groupby('filename')['bbox'].apply(list).reset_index()
my_dict = df.set_index('filename')['bbox'].to_dict()
return my_dict
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_stats_gallery` function. Write a Python function `def do_create_stats_gallery(stats_file, save_path, num_images=20, lazy_load=False, get_label_func=None, metric='blur', slice=None, max_width=None, descending=False, get_bounding_box_func=None, get_reformat_filename_func=None, get_extra_col_func=None, input_dir=None, work_dir=None, **kwargs)` to solve the following problem:
Function to create and display a gallery of images computed by the outliers metrics. Note that fastdup generates a histogram of all the encountred valued for the specific metric. The red dashed line on this plot resulting in the number of images displayed in the report. For example, assume you have unique image values between 30-250, and the report displays 100 images with values betwewen 30-50. We plot a red line on the value 50. Parameters: stats_file (str): csv file with the computed image statistics by the fastdup tool. alternatively, a pandas dataframe can be passed in directly with the stats computed by fastdup. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. metric (str): Optional metric selection. One of blur, size, mean, min, max, unique, stdv. Default is blur. slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels. max_width (int): Optional param to limit the image width descending (bool): Optional param to control the order of the metric get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow reformatting the image file name. This is a function the user implements that gets the full file path and returns a new file name. get_extra_col_func (callable): Optional parameter to allow adding extra column to the report. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
Here is the function:
def do_create_stats_gallery(stats_file, save_path, num_images=20, lazy_load=False, get_label_func=None,
metric='blur', slice=None, max_width=None, descending=False, get_bounding_box_func=None,
get_reformat_filename_func=None, get_extra_col_func=None, input_dir=None, work_dir=None,
**kwargs):
'''
Function to create and display a gallery of images computed by the outliers metrics.
Note that fastdup generates a histogram of all the encountred valued for the specific metric. The red dashed line on this plot resulting in the number of images displayed in the report.
For example, assume you have unique image values between 30-250, and the report displays 100 images with values betwewen 30-50. We plot a red line on the value 50.
Parameters:
stats_file (str): csv file with the computed image statistics by the fastdup tool. alternatively, a pandas dataframe can be passed in directly with the stats computed by fastdup.
save_path (str): output folder location for the visuals
num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory.
lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size).
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
metric (str): Optional metric selection. One of blur, size, mean, min, max, unique, stdv. Default is blur.
slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels.
max_width (int): Optional param to limit the image width
descending (bool): Optional param to control the order of the metric
get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image.
The input is an absolute path to the image and the output is a list of bounding boxes.
Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]]
Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename.
Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists
get_reformat_filename_func (callable): Optional parameter to allow reformatting the image file name. This is a function the user implements that gets the full file path and returns a new file name.
get_extra_col_func (callable): Optional parameter to allow adding extra column to the report.
input_dir (str): Optional parameter to specify the input directory of webdataset tar files,
in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
'''
img_paths = []
get_bounding_box_func = get_bounding_box_func_helper(get_bounding_box_func)
df = stats_file
if metric is not None and metric == 'size':
df['size'] = df['width'] * df['height']
assert metric in df.columns, "Failed to find metric " + metric + " in " + str(df.columns)
if metric in ['unique', 'width', 'height', 'size']:
df = df[df[metric] > DEFUALT_METRIC_ZERO]
elif metric in ['blur', 'mean', 'min', 'max', 'stdv']:
df = df[df[metric] != DEFAULT_METRIC_MINUS_ONE]
if slice is not None:
subdf = find_label(get_label_func, df, 'filename', 'label', kwargs)
subdf = slice_df(subdf, slice, 'label', kwargs)
subdf = subdf.sort_values(metric, ascending=not descending).head(num_images)
else:
if 'external_df' not in kwargs:
subdf = df.sort_values(metric, ascending=not descending).head(num_images)
else:
subdf = df.head(num_images)
assert len(subdf), "Encountered an empty stats data frame"
subdf = find_label(get_label_func, subdf, 'filename', 'label', kwargs)
save_dir = calc_save_dir(save_path)
stat_info = ""
filename = "N/A"
for i, row in tqdm(subdf.iterrows(), total=min(num_images, len(subdf)), desc="Generating gallery"):
try:
assert row['filename'] is not None, f"Failed with empty filename {subdf.head(2)}"
filename = lookup_filename(row['filename'], work_dir)
img = fastdup_imread(filename, None, None)
assert img is not None, "Failed to read image " + filename + " orig filename " + row['filename']
img = plot_bounding_box(img, get_bounding_box_func, filename)
img = my_resize(img, max_width)
imgpath = calc_image_path(lazy_load, save_dir, filename)
fastdup_imwrite(imgpath, img)
except Exception as ex:
fastdup_capture_exception("do_create_stats_gallery", ex)
traceback.print_exc()
print("Failed to generate viz for images", filename, ex)
imgpath = None
img_paths.append(imgpath)
import fastdup.html_writer
img_html = format_image_html_string(img_paths, lazy_load, max_width, save_dir)
subdf.insert(0, 'Image', img_html)
cols = [metric,'Image','filename']
if callable(get_extra_col_func):
subdf['extra'] = subdf['filename'].apply(lambda x: get_extra_col_func(x))
cols.append('extra')
if callable(get_reformat_filename_func):
subdf['filename'] = subdf['filename'].apply(lambda x: get_reformat_filename_func(x))
out_file = os.path.join(save_path, metric + '.html') if not str(save_path).endswith(".html") else save_path
title = metric + ' Image Report'
if metric == "mean" and descending:
title = "Bright Image Report"
elif metric == "mean":
title = "Dark Image Report"
elif metric == "size" and descending:
title = "Largest Image Report"
elif metric == "size":
title = "Smallest Image Report"
elif metric == "blur" and not descending:
title = "Blurry Image Report"
elif metric == "blur":
title = "Sharpest Image Report"
subtitle = "Showing example images, sort by "
subtitle += "descending" if descending else "ascending"
subtitle += " order"
if slice is not None:
subtitle += ", " + str(slice)
if metric == 'size':
cols.append('width')
cols.append('height')
if 'label' in subdf.columns:
cols.append('label')
subdf['info'] = swap_dataframe(subdf, cols)
fastdup.html_writer.write_to_html_file(subdf[['Image','info']], title, out_file, stat_info, subtitle,
jupyter_html=kwargs.get('jupyter_html', False))
assert os.path.exists(out_file), "Failed to generate out file " + out_file
print_success_msg(metric, out_file, lazy_load)
clean_images(lazy_load, img_paths, "create_stats_gallery")
return 0 | Function to create and display a gallery of images computed by the outliers metrics. Note that fastdup generates a histogram of all the encountred valued for the specific metric. The red dashed line on this plot resulting in the number of images displayed in the report. For example, assume you have unique image values between 30-250, and the report displays 100 images with values betwewen 30-50. We plot a red line on the value 50. Parameters: stats_file (str): csv file with the computed image statistics by the fastdup tool. alternatively, a pandas dataframe can be passed in directly with the stats computed by fastdup. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. metric (str): Optional metric selection. One of blur, size, mean, min, max, unique, stdv. Default is blur. slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels. max_width (int): Optional param to limit the image width descending (bool): Optional param to control the order of the metric get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow reformatting the image file name. This is a function the user implements that gets the full file path and returns a new file name. get_extra_col_func (callable): Optional parameter to allow adding extra column to the report. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' |
6,502 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
try:
from tqdm.auto import tqdm
except:
tqdm = (lambda x, total=None, desc=None: x)
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def format_image_html_string(img_paths, lazy_load, max_width, save_path=None):
if not lazy_load:
return [imageformatter(x, max_width) for x in img_paths]
else:
return ["<img src=\"" + shorten_image(x, save_path) + "\" loading=\"lazy\">" for x in img_paths]
def swap_dataframe(subdf, cols):
cols_no_images = [x for x in cols if (x.lower() != 'image' and not x.startswith('info') and x.lower()!= 'similar')]
new_rows = []
for i,row in subdf[cols_no_images].iterrows():
dfrow = pd.DataFrame(row)
new_rows.append(dfrow)
return new_rows
def find_label(get_label_func, df, in_col, out_col, vqa_prompt: str = None, kwargs=None):
if (get_label_func is not None):
if isinstance(get_label_func, str):
if os.path.exists(get_label_func):
df_labels = load_labels(get_label_func, kwargs)
assert len(df_labels) == len(df), f"Error: wrong length of labels file {get_label_func} expected {len(df)} got {len(df_labels)}"
df[out_col] = df_labels['label']
elif get_label_func in df.columns:
df[out_col] = df['label']
elif get_label_func in CAPTION_MODEL_NAMES:
from fastdup.captions import generate_labels
df[out_col] = generate_labels(df[in_col], get_label_func, device='cpu')
elif get_label_func == VQA_MODEL1_NAME:
from fastdup.captions import generate_vqa_labels
df[out_col] = generate_vqa_labels(df[in_col], vqa_prompt, kwargs)
elif get_label_func == AGE_LABEL1_NAME:
from fastdup.captions import generate_age_labels
df[out_col] = generate_age_labels(df[in_col], kwargs)
else:
assert False, f"Found str label {get_label_func} but it is neither a file nor a column name in the dataframe {df.columns}"
elif isinstance(get_label_func, dict):
df[out_col] = df[in_col].apply(lambda x: get_label_func.get(x, MISSING_LABEL))
elif callable(get_label_func):
assert len(df), "Empty dataframe"
assert in_col in df.columns, f"Missing column {in_col}"
df[out_col] = df[in_col].apply(lambda x: get_label_func(x))
else:
assert False, f"Failed to understand get_label_func type {type(get_label_func)}"
if kwargs is not None and 'debug_labels' in kwargs:
print(df.head())
return df
def slice_df(df, slice, colname, kwargs=None):
assert len(df), "Df has no rows"
split_sentence_to_label_list = kwargs is not None and 'split_sentence_to_label_list' in kwargs and kwargs['split_sentence_to_label_list']
debug_labels = kwargs is not None and 'debug_labels' in kwargs and kwargs['debug_labels']
grouped = kwargs is not None and 'grouped' in kwargs and kwargs['grouped']
if slice is not None:
if isinstance(slice, str):
# cover the case labels are string or lists of strings
if split_sentence_to_label_list:
labels = df[colname].astype(str).apply(lambda x: split_str(x.lower())).values
if debug_labels:
print('Label with split sentence', labels[:10])
else:
labels = df[colname].astype(str).values
if debug_labels:
print('label without split sentence', labels[:10])
is_list = isinstance(labels[0], list)
if grouped:
df = df[df[colname].apply(lambda x: slice in x)]
assert len(df), f"Failed to find any labels with value={slice}"
elif is_list:
labels = [item for sublist in labels for item in sublist]
if debug_labels:
print('labels after merging sublists', labels[:10])
df = df[df[colname].apply(lambda x: slice in [y.lower() for y in x])]
else:
df2 = df[df[colname] == slice]
if len(df2) == 0:
df2 = df[df[colname].apply(lambda x: slice in str(x))]
df = df2
elif isinstance(slice, list):
if isinstance(df[colname].values[0], list):
df = df[df[colname].apply(lambda x: len(set(x)&set(slice)) > 0)]
else:
df = df[df[colname].isin(slice)]
assert len(df), f"Failed to find any labels with {slice}"
else:
assert False, "slice must be a string or a list of strings"
return df
def lookup_filename(filename, work_dir):
assert isinstance(filename, str), f"Wrong for type {filename} {type(filename)}"
if os.path.exists(filename):
return filename
if filename.startswith(S3_TEMP_FOLDER + get_sep()) or filename.startswith(S3_TEST_TEMP_FOLDER + get_sep()):
assert work_dir is not None, f"Failed to find work_dir on remote_fs: filename was {filename}"
filename = os.path.join(work_dir, filename)
return filename
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def generate_sprite_image(img_list, sample_size, log_dir, get_label_func=None, h=0, w=0, alternative_filename=None, alternative_width = None, max_width=None, **kwargs):
'''
Generate a sprite image of images for tensorboard projector. A sprite image is a large image composed of grid of smaller images.
Parameters:
img_list (list): list of image filenames (full path)
sample_size (int): how many images in to plot
log_dir (str): directory to save the sprite image
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
h (int): optional requested hight of each subimage
w (int): optional requested width of each subimage
alternative_filename (str): optional parameter to save the resulting image to a different name
alternative_width (int): optional parameter to control the number of images per row
max_width (int): optional parameter to control the rsulting width of the image
force_width (int): optional width for the number of images tiled
force_height (int): optional height for the number of images tiled
Returns:
path (str): path to sprite image
labels (list): list of labels
'''
try:
assert len(img_list), "Image list is empty"
assert sample_size > 0
if alternative_filename is None:
if not os.path.exists(log_dir):
os.makedirs(log_dir, exist_ok=True)
from fastdup.tensorboard_projector import generate_sprite_image as tgenerate_sprite_image
ret = tgenerate_sprite_image(img_list, sample_size, log_dir, get_label_func, h=h, w=w,
alternative_filename=alternative_filename, alternative_width=alternative_width, max_width=max_width, kwargs=kwargs)
return ret
except Exception as ex:
fastdup_capture_exception("generate_sprite_image", ex)
def enhance_image(image):
# Convert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply histogram equalization
equalized = cv2.equalizeHist(gray)
# Return equalized image
return equalized
def calc_image_path(lazy_load, save_path, filename, filename_suffix=''):
if lazy_load:
os.makedirs(os.path.join(save_path, "images"), exist_ok=True)
folder = os.path.join(save_path, "images")
else:
folder = os.path.join(save_path)
filename = safe_replace(filename)
imgpath = os.path.join(folder, filename)
p, ext = os.path.splitext(imgpath)
if ext is not None and ext != '' and ext.lower() not in SUPPORTED_IMG_FORMATS:
ext += ".jpg"
imgpath = p + filename_suffix + ext
if len(imgpath) > 255:
temp_name = next(tempfile._get_candidate_names())
imgpath = os.path.join(folder, temp_name + filename_suffix + ext)
return imgpath
def pad_image(image, target_width, target_height):
# Get the width and height of the original image
(height, width) = image.shape[:2]
# Calculate the padding sizes
pad_left = max((target_width - width) // 2, 0)
pad_right = max((target_width - width) // 2, 0) + (target_width - width) % 2
pad_top = max((target_height - height) // 2, 0)
pad_bottom = max((target_height - height) // 2, 0) + (target_height - height) % 2
# Create a padded image with the same type as the original image
padded_image = cv2.copyMakeBorder(image, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT, value=0)
return padded_image
def clean_images(lazy_load, img_paths, section):
if not lazy_load:
for i in img_paths:
try:
if i is not None and os.path.exists(i):
os.unlink(i)
except Exception as ex:
print("Failed to delete image file ", i, ex)
fastdup_capture_exception(section, ex)
def fastdup_imread(img1_path, input_dir, kwargs):
"""
Read an image from local file, or from a tar file, or from s3/minio path using minio client mc
Parameters:
img1_path (str): path to the image
input_dir (str): optional directory path in case the image is found on a webdataset in another path or found in s3
Returns:
img1 (np.array): the image
"""
assert not pd.isnull(img1_path), f"img1_path should not be None {img1_path} {input_dir}, {kwargs}"
is_minio_or_s3 = False
if input_dir is not None and (isinstance(input_dir, str) or isinstance(input_dir, pathlib.Path)):
if input_dir.startswith('~/'):
input_dir = os.path.expanduser(input_dir)
if not input_dir.startswith("s3://") and not input_dir.startswith("minio://"):
assert os.path.exists(input_dir), "Failed to find input_dir: " + input_dir
else:
is_minio_or_s3 = True
if img1_path.startswith('~/'):
img1_path = os.path.expanduser(img1_path)
if os.path.exists(img1_path):
img = inner_read(img1_path)
return img
elif ('/' +S3_TEMP_FOLDER + '/' in img1_path or '/' + S3_TEST_TEMP_FOLDER + '/' in img1_path) and \
'.tar/' in img1_path:
assert os.path.exists(input_dir), "Failed to find input dir " + input_dir
pos = os.path.dirname(img1_path).find(input_dir.replace('/',''))
tar_file = os.path.dirname(img1_path)[pos+len(input_dir.replace('/','')):]
tar_file = os.path.join(input_dir, tar_file)
if kwargs is not None and "reformat_tar_name" in kwargs and callable(kwargs['reformat_tar_name']):
tar_file = kwargs["reformat_tar_name"](tar_file)
print('Found tar file', tar_file)
img_name = os.path.basename(img1_path)
try:
with tarfile.open(tar_file, "r") as tar:
f = tar.extractfile(img_name)
return cv2.imdecode(np.frombuffer(f.read(), np.uint8), cv2.IMREAD_COLOR)
except Exception as ex:
fastdup_capture_exception("fastdup_imread", ex)
print("Error reading from tar file: ", tar_file, ex)
return None
elif is_minio_or_s3 and input_dir is not None:
if input_dir.startswith("minio://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
minio_prefix = "/".join(input_dir.replace("minio://", "").split('/')[:2])
#print('minio_prefix', minio_prefix)
download_minio(minio_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
assert ret is not None, f"Failed to read image {os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path))}"
return ret
elif input_dir.startswith("s3://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
s3_prefix = 's3://' + "/".join(input_dir.replace("s3://", "").split('/')[:1])
#print('s3_prefix', s3_prefix)
download_s3(s3_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
return ret
#Failed to read image1 ..\milvus_vector_db\data\images\..\milvus_vector_db\data\images\Egyptian_Mau_210.jpg
elif input_dir is not None and img1_path.startswith(input_dir) and len(img1_path) >= len(input_dir) +2:
suffix = img1_path[len(input_dir):]
if input_dir in suffix and os.path.exists(suffix):
img = inner_read(suffix)
return img
elif "''" in img1_path: # try to handle french and other languages where c side doubles the '' otherwise pandas can't read it
new_img1_path = img1_path.replace("''","")
if os.path.exists(new_img1_path):
img = inner_read(new_img1_path)
return img
print('Failed to read image from img_path', img1_path)
return None
def fastdup_imwrite(local_file, im):
has_extension = check_valid_image_extension(local_file)
if has_extension:
ret = cv2.imwrite(local_file, im)
else:
local_file_wext = local_file + '.jpg'
ret = cv2.imwrite(local_file_wext, im)
assert ret, f"Failed to save img to {local_file} most likely filename is too long for the OS"
# Rename back if extension was added
os.rename(local_file_wext, local_file)
assert os.path.isfile(local_file), "Failed to save img to " + local_file
if ret == False and len(local_file) >= 254:
try:
import uuid
import shutil
file, ext = os.path.splitext(local_file)
tmp_filename = str(uuid.uuid4()) + ext
ret = cv2.imwrite(tmp_filename, im)
if os.path.exists(local_file):
os.unlink(local_file)
shutil.move(tmp_filename, local_file)
finally:
assert ret, f"Failed to save img to {local_file} most likely filename is too long for the OS"
elif ret == False:
assert ret, f"Failed to save img to {local_file}"
assert os.path.isfile(local_file), "Failed to save img to " + local_file
def my_resize(img, max_width):
if img is None:
return None
h, w, c = get_shape(img)
w1 = 320
if max_width is not None and w > max_width:
w1 = max_width
aspect = h/w
if (h > w1 or w > w1) and aspect > 0 and int(w1/aspect) > 0 and w1 > 0:
img = cv2.resize(img, (int(w1/aspect), w1))
return img
def plot_bounding_box(img, get_bounding_box_func, filename):
bbox_list = []
if callable(get_bounding_box_func):
bbox_list = get_bounding_box_func(filename)
elif isinstance(get_bounding_box_func, dict):
bbox_list = get_bounding_box_func.get(filename, [])
for i in bbox_list:
cur_bbox = i
cur_bbox = [int(x) for x in cur_bbox]
img = cv2.rectangle(img, (cur_bbox[0], cur_bbox[1]), (cur_bbox[2], cur_bbox[3]), (0, 255, 0), 3)
return img
def convert_v1_to_v02(df):
if 'filename_from' in df.columns and 'filename_to' in df.columns:
del df['from']
del df['to']
df = df.rename(columns={'filename_from': 'from', 'filename_to': 'to'})
if 'filename_outlier' in df.columns and 'filename_nearest' in df.columns:
df = df.rename(columns={'filename_outlier': 'from', 'filename_nearest': 'to'})
if 'label_from' in df.columns and 'label_to' in df.columns:
df = df.rename(columns={'label_from': 'label', 'label_to': 'label2'})
if 'label_outlier' in df.columns:
df = df.rename(columns={'label_outlier': 'label'})
return df
def calc_save_dir(save_path):
save_dir = save_path
if save_dir.endswith(".html"):
save_dir = os.path.dirname(save_dir)
if save_dir == "":
save_dir = "."
return save_dir
def load_filenames(work_dir, kwargs):
assert work_dir is not None and isinstance(work_dir, str) and os.path.exists(work_dir), \
f"Need to specify work_dir to point to the location of fastdup work_dir, got {work_dir}"
load_crops = 'load_crops' in kwargs and kwargs['load_crops']
draw_bbox = 'draw_bbox' in kwargs and kwargs['draw_bbox']
if work_dir.endswith('.csv'):
local_filenames = work_dir
elif load_crops or draw_bbox:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_CROP_LIST)
else:
local_filenames = os.path.join(work_dir, "atrain_" + FILENAME_IMAGE_LIST)
assert os.path.isfile(local_filenames), "Failed to find fastdup input file " + local_filenames
nrows = find_nrows(kwargs)
import pandas as pd
filenames = pd.read_csv(local_filenames, nrows=nrows)
assert len(filenames), "Empty dataframe found " + local_filenames
assert 'filename' in filenames.columns, f"Error: Failed to find filename column in {work_dir}/atrain_{FILENAME_IMAGE_LIST}"
if load_crops and not draw_bbox:
assert 'crop_filename' in filenames.columns, f"Failed to load crop filename {local_filenames}"
filenames["filename"] = filenames["crop_filename"]
return filenames
def merge_with_filenames(df, filenames):
df2 = df.merge(filenames, left_on='from', right_on='index').merge(filenames, left_on='to', right_on='index')
assert df2 is not None and len(df2), f"Failed to merge similarity/outliers with atrain_features.dat.csv file, \n{df.head()}, \n{filenames.head()}"
df = df2
del df['from']
del df['to']
del df['index_x']
del df['index_y']
df = df.rename(columns={'filename_x': 'from', 'filename_y': 'to'})
return df
def get_bounding_box_func_helper(get_bounding_box_func):
if get_bounding_box_func is None:
return None
import pandas as pd
if callable(get_bounding_box_func) or isinstance(get_bounding_box_func, dict):
return get_bounding_box_func
elif isinstance(get_bounding_box_func, str):
if os.path.isfile(get_bounding_box_func):
df = pd.read_csv(get_bounding_box_func)
elif os.path.isdir(get_bounding_box_func):
local_file = os.path.join(get_bounding_box_func, "atrain_crops.csv")
assert os.path.exists(local_file), "Failed to find bounding box file in " + local_file
df = pd.read_csv(os.path.join(get_bounding_box_func, "atrain_crops.csv"))
else:
assert False, "Failed to find input file/folder " + get_bounding_box_func
elif isinstance(get_bounding_box_func, pd.DataFrame):
df = get_bounding_box_func
else:
assert False, "get_bounding_box_func should be a callable function, a dictionary, a file with bounding box info or a dataframe"
assert len(df), "Empty dataframe with bounding box information"
assert "filename" in df.columns
assert "row_y" in df.columns
assert "col_x" in df.columns
assert "width" in df.columns
assert "height" in df.columns
df["bbox"] = df.apply(lambda x: [x["col_x"], x["row_y"], x["col_x"] + x["width"], x["row_y"] + x["height"]], axis=1)
df = df.groupby('filename')['bbox'].apply(list).reset_index()
my_dict = df.set_index('filename')['bbox'].to_dict()
return my_dict
def generate_sprite_image(img_path, sample_size, log_dir, get_label_func = None, h = 0, w = 0, alternative_filename = None, alternative_width=None, max_width=None, kwargs={}):
# Generate sprite image
images_pil = []
labels = []
H = IMAGE_SIZE if h == 0 else h
W = IMAGE_SIZE if w == 0 else w
if max_width is not None and h != 0 and w != 0:
if W > max_width:
scale = 1.0*W/max_width
H = int(1.0*H/scale)
W = int(1.0*w/scale)
else:
if W > 320:
scale = 1.0*W/320
H = int(1.0*H/scale)
W = int(1.0*W/scale)
if alternative_width is not None:
NUM_IMAGES_WIDTH = alternative_width
if (alternative_width < sample_size):
sample_size = alternative_width
height = 1
elif kwargs and 'force_width' in kwargs and 'force_height' in kwargs:
assert isinstance(kwargs['force_width'], int), "force_width must be an integer"
assert isinstance(kwargs['force_height'], int), "force_height must be an integer"
if kwargs['force_width'] * kwargs['force_height'] > len(img_path):
print(f"Warning: missing images for a full grid, requested {kwargs['force_width'] * kwargs['force_height']} got {len(img_path)}")
NUM_IMAGES_WIDTH = kwargs['force_width']
height = kwargs['force_width']
else:
NUM_IMAGES_WIDTH = int(1.4*np.ceil(np.sqrt(min(sample_size, len(img_path)))))
divs = int(np.ceil(min(sample_size,len(img_path)) / NUM_IMAGES_WIDTH))
height = min(divs, NUM_IMAGES_WIDTH)
for i, im in enumerate(img_path[:sample_size]):
# Save both tf image for prediction and PIL image for sprite
if isinstance(im, str):
try:
assert os.path.exists(im)
img_pil = cv2.imread(im)
assert img_pil is not None, f"Failed to read image from {im}"
img_pil = cv2.cvtColor(img_pil, cv2.COLOR_BGR2RGB)
img_pil = cv2.resize(img_pil, (W, H))
except Exception as ex:
print("Failed to load image" + im)
continue
else:
img_pil = cv2.resize(im, (W, H))
img_pil = cv2.cvtColor(img_pil, cv2.COLOR_BGR2RGB)
images_pil.append(Image.fromarray(img_pil))
# Assuming your output data is directly the label
if callable(get_label_func):
label = get_label_func(im)
elif isinstance(get_label_func, list):
label = get_label_func[i]
else:
label = "N/A"
labels.append(label)
# Create a sprite imagei
spriteimage = Image.new(
mode='RGB',
size=(W*NUM_IMAGES_WIDTH, H*height),
color=(255,255,255)
)
for count, image in enumerate(images_pil):
h_loc = count // NUM_IMAGES_WIDTH
w_loc = count % NUM_IMAGES_WIDTH
spriteimage.paste(image, (w_loc*W, h_loc*H))
if max_width is not None:
factor = max_width / spriteimage.width
spriteimage = spriteimage.resize((int(spriteimage.width * factor), int(spriteimage.height * factor)))
if isinstance(img_path[0], str):
if alternative_filename is not None:
SPRITE_PATH =alternative_filename
else:
SPRITE_PATH= f'{log_dir}/sprite.png'
spriteimage.convert('RGB').save(SPRITE_PATH)
return SPRITE_PATH, labels
else:
return np.array(spriteimage.convert('RGB')), labels
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_similarity_gallery` function. Write a Python function `def do_create_similarity_gallery(similarity_file, save_path, num_images=20, lazy_load=False, get_label_func=None, slice=None, max_width=None, descending=False, get_bounding_box_func =None, get_reformat_filename_func=None, get_extra_col_func=None, input_dir=None, work_dir = None, min_items=2, max_items=None, **kwargs)` to solve the following problem:
Function to create and display a gallery of images computed by the outliers metrics Parameters: similarity_file (str): csv file with the computed image statistics by the fastdup tool, alternatively a pandas dataframe can be passed in directly. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. metric (str): Optional metric selection. One of blur, size, mean, min, max, width, height, unique. slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels. A special value is 'label_score' which is used for comparing both images and labels of the nearest neighbors. max_width (int): Optional param to limit the image width descending (bool): Optional param to control the order of the metric get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow reformatting the filename before displaying it in the report. This is a function the user implements that gets the full file path and returns a string with the reformatted filename. get_extra_col_func (callable): Optional parameter to allow adding more image information to the report like the image label. This is a function the user implements that gets the full file path and returns html string with the label or any other metadata desired. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' work_dir (str): Optional parameter to fastdup work_dir. Needed when similarity_file is a pd.DataFrame. min_items (int): Minimal number of items in the similarity group (optional). max_items (int): Maximal number of items in the similarity group (optional). Returns: ret (pd.DataFrame): Dataframe with the image statistics
Here is the function:
def do_create_similarity_gallery(similarity_file, save_path, num_images=20, lazy_load=False, get_label_func=None,
slice=None, max_width=None, descending=False, get_bounding_box_func =None,
get_reformat_filename_func=None, get_extra_col_func=None, input_dir=None, work_dir = None, min_items=2,
max_items=None, **kwargs):
'''
Function to create and display a gallery of images computed by the outliers metrics
Parameters:
similarity_file (str): csv file with the computed image statistics by the fastdup tool, alternatively a pandas dataframe can be passed in directly.
save_path (str): output folder location for the visuals
num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory.
lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size).
get_label_func (callable): optional function given an absolute path to an image return the image label.
Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels.
Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file.
metric (str): Optional metric selection. One of blur, size, mean, min, max, width, height, unique.
slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels. A special value is 'label_score' which is used for comparing both images and labels of the nearest neighbors.
max_width (int): Optional param to limit the image width
descending (bool): Optional param to control the order of the metric
get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image.
The input is an absolute path to the image and the output is a list of bounding boxes.
Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]]
Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename.
Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists
get_reformat_filename_func (callable): Optional parameter to allow reformatting the filename before displaying it in the report. This is a function the user implements that gets the full file path and returns a string with the reformatted filename.
get_extra_col_func (callable): Optional parameter to allow adding more image information to the report like the image label. This is a function the user implements that gets the full file path and returns html string with the label or any other metadata desired.
input_dir (str): Optional parameter to specify the input directory of webdataset tar files,
in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1'
work_dir (str): Optional parameter to fastdup work_dir. Needed when similarity_file is a pd.DataFrame.
min_items (int): Minimal number of items in the similarity group (optional).
max_items (int): Maximal number of items in the similarity group (optional).
Returns:
ret (pd.DataFrame): Dataframe with the image statistics
'''
from fastdup import generate_sprite_image
img_paths2 = []
from_paths = []
info0 = []
info = []
label_score = []
lengths = []
debug_sim = False
#v1 = 'id_to_filename_func' in kwargs
df = similarity_file
if debug_sim:
print("sim df", df.head())
get_bounding_box_func = get_bounding_box_func_helper(get_bounding_box_func)
reformat_disp_path = kwargs.get('get_display_filename_func', lambda x: x)
load_crops = kwargs.get('load_crops', False)
save_dir = calc_save_dir(save_path)
subdir = os.path.join(save_dir, "images")
if not os.path.exists(subdir):
os.mkdir(subdir)
if 'from_filename' not in df.columns and 'to_filename' not in df.columns:
if load_crops:
assert "filename" not in df.columns
filenames = load_filenames(work_dir, kwargs)
assert filenames is not None and not filenames.empty, f"Failed to read crop files from {work_dir}"
assert "index" in filenames.columns and "filename" in filenames.columns
df = merge_with_filenames(df, filenames[["index","filename"]])
if debug_sim:
print("after merge", df.head())
else:
df = similarity_file
if df['from'].dtype in [int, np.int64]:
assert df['to'].dtype in [int, np.int64], "Wrong types, expect both str or both int"
filenames = load_filenames(work_dir, kwargs)
filenames = filenames[["index", "filename"]]
df = merge_with_filenames(df, filenames)
if debug_sim:
print("after merge", df.head())
else:
df = convert_v1_to_v02(df)
if get_label_func is not None and ('label' not in df.columns or 'label2' not in df.columns):
df = find_label(get_label_func, df, 'from', 'label', kwargs)
df = find_label(get_label_func, df, 'to', 'label2', kwargs)
if slice != 'label_score':
df = slice_df(df, slice, 'label')
if df is None:
return 1
else:
print("Warning: you are running create_similarity_gallery() without providing get_label_func so similarities are not computed between different classes. "
"It is recommended to run this report with labels. Without labels this report output is similar to create_duplicate_gallery()")
df = df.sort_values(['from','distance'], ascending= not descending)
if 'label' in df.columns and 'label2' in df.columns:
top_labels_to = df.groupby('from')['label2'].apply(list)
top_labels_from = df.groupby('from')['label'].apply(list)
tos = df.groupby('from')['to'].apply(list)
distances = df.groupby('from')['distance'].apply(list)
assert len(tos), "Empty list"
if 'label' in df.columns:
subdf = pd.DataFrame({'to':tos, 'label':top_labels_from, 'label2':top_labels_to, 'distance':distances}).reset_index()
else:
subdf = pd.DataFrame({'to':tos, 'distance':distances}).reset_index()
if debug_sim:
print("subdf", subdf.head())
info_df = None
if slice is None or slice != 'label_score':
subdf = subdf.sort_values(['distance'], ascending=not descending)
assert len(subdf), "Empty dataframe"
df2 = subdf.copy()
subdf = subdf.head(num_images)
assert len(subdf), "Empty dataframe"
stat_info = None
else:
assert len(subdf), "Empty dataframe"
for i, row in tqdm(subdf.iterrows(), total=len(subdf), desc="Generating gallery"):
filename = str(row["from"])
filename = lookup_filename(filename, work_dir)
from_label = row['label'][0]
to_label = row['label2']
similar = [x==from_label for x in list(to_label)]
similar = 100.0*sum(similar)/(1.0*len(to_label))
lengths.append(len(row['label']))
label_score.append(similar)
subdf['score'] = label_score
subdf['length'] = lengths
subdf = subdf[subdf['length'] >= min_items]
if max_items is not None:
subdf = subdf[subdf['length'] <= max_items]
subdf = subdf.sort_values(['score','length'], ascending=not descending)
df2 = subdf.copy()
subdf = subdf.head(num_images)
for i, row in tqdm(subdf.iterrows(), total=min(num_images, len(subdf)), desc="Generating gallery"):
info_df = None
info0_df = None
try:
label = None
filename = row["from"]
filename = lookup_filename(filename, work_dir)
if 'label' in row:
label = row['label']
if isinstance(label, list):
label = label[0]
disp_filename = reformat_disp_path(filename)
if callable(get_reformat_filename_func):
new_filename = get_reformat_filename_func(filename)
else:
new_filename = disp_filename
if label is not None:
info0_df = pd.DataFrame({'label':[label],'from':[new_filename]}).T
else:
info0_df = pd.DataFrame({'from':[new_filename]}).T
img = fastdup_imread(filename, input_dir=input_dir, kwargs=kwargs)
assert img is not None, f"Failed to read image {str(filename)} {input_dir}"
img = plot_bounding_box(img, get_bounding_box_func, filename)
img = my_resize(img, max_width)
if 'enhance_image' in kwargs and kwargs['enhance_image']:
img = enhance_image(img)
image_suffix = ''
imgpath = calc_image_path(lazy_load, subdir, filename, filename_suffix=image_suffix)
fastdup_imwrite(imgpath, img)
MAX_IMAGES = 10
to_impaths_ = row["to"][:MAX_IMAGES]
assert len(to_impaths_), "Empty image path list"
#else:
imgs = [plot_bounding_box(fastdup_imread(im, input_dir=input_dir, kwargs=kwargs),get_bounding_box_func,im) for im in to_impaths_]
assert len(imgs), "Empty image list"
keep_aspect_ratio = True
if kwargs is not None and 'keep_aspect_ratio' in kwargs and not kwargs['keep_aspect_ratio']:
keep_aspect_ratio = False
h = []
w = []
for im in imgs:
if im is not None:
h.append(im.shape[0])
w.append(im.shape[1])
assert len(h), f"Failed to read all images from {input_dir}"
avg_h = int(np.mean(h))
avg_w = int(np.mean(w))
max_h = int(np.max(h))
max_w = int(np.max(w))
if keep_aspect_ratio:
avg_h = max_h
avg_w = max_w
img2 = []
for f in imgs:
if not keep_aspect_ratio:
f = cv2.resize(f, (avg_w,avg_h))
else:
f = pad_image(f, avg_w, avg_h)
img2.append(f)
to_impaths = []
for im, imgpath2 in zip(img2, to_impaths_):
assert imgpath2 != imgpath, f"Found duplicate image {imgpath} {imgpath2}"
image_suffix = ''
imgpath2 = calc_image_path(lazy_load, save_dir, imgpath2, filename_suffix=image_suffix)
if 'enhance_image' in kwargs and kwargs['enhance_image']:
im = enhance_image(im)
fastdup_imwrite(imgpath2, im)
to_impaths.append(imgpath2)
distances = row['distance'][:MAX_IMAGES]
imgpath2 = f"{subdir}/to_image_{i}.jpg"
info_df = pd.DataFrame({'distance':distances, 'to':[lookup_filename(im, work_dir) for im in to_impaths]})
info_df['to'] = [reformat_disp_path(fid) for fid in to_impaths_]
if callable(get_reformat_filename_func):
info_df['to'] = info_df['to'].apply(lambda x: get_reformat_filename_func(x))
if 'label2' in subdf.columns:
info_df['label2'] = row['label2'][:MAX_IMAGES]
info_df = info_df.sort_values('distance',ascending=False)
info_df = info_df.set_index('distance')
h = max_width if max_width is not None else 0
w = h
if keep_aspect_ratio:
h = avg_h
w = avg_w
to_labels = None
if 'label2' in info_df.columns:
to_labels = info_df['label2'].values
sample_size= min(len(imgs), MAX_IMAGES)
to_impaths = to_impaths[:sample_size]
to_impaths.reverse()
generate_sprite_image(to_impaths, min(len(imgs), MAX_IMAGES), save_dir, to_labels, h, w, imgpath2, min(len(imgs),MAX_IMAGES), max_width=max_width)
assert os.path.exists(imgpath2), "Failed to generate sprite image " + imgpath2
# This addition should be last before exception otherwise lengths do not match in case of exception
except Exception as ex:
fastdup_capture_exception("create_similarity_gallery", ex)
print("Failed to generate viz for images", filename, ex)
imgpath = None
imgpath2 = None
info_df = None
info0_df = None
if imgpath2 is not None and imgpath is not None and info_df is not None and info0_df is not None:
img_paths2.append(imgpath2)
from_paths.append(imgpath)
info.append(info_df)
info0.append(info0_df)
import fastdup.html_writer
img_html1 = format_image_html_string(from_paths, lazy_load, max_width, save_dir)
img_html2 = format_image_html_string(img_paths2, lazy_load, None, save_dir)
subdf.insert(0, 'Query Image', img_html1)
subdf.insert(0, 'Similar', img_html2)
subdf['info_to'] = info
subdf['info_from'] = info0
if not str(save_path).endswith('.html'):
out_file = os.path.join(save_path, 'similarity.html')
else:
out_file = save_path
title = 'Similarity Report'
if slice is not None:
title += ", " + str(slice)
cols = ['info_from','info_to', 'Query Image','Similar']
#if slice is not None and slice == 'label_score':
# cols = ['score'] + cols
if callable(get_extra_col_func):
subdf['extra'] = subdf['from'].apply(lambda x: get_extra_col_func(x))
cols.append('extra')
subdf['info'] = swap_dataframe(subdf, cols)
fastdup.html_writer.write_to_html_file(subdf[cols], title, out_file, "", max_width,
jupyter_html=kwargs.get('jupyter_html', False))
assert os.path.exists(out_file), "Failed to generate out file " + out_file
print_success_msg('similar images', out_file, lazy_load)
save_artifacts = 'save_artifacts' in kwargs and kwargs['save_artifacts']
clean_images(lazy_load or save_artifacts, set(img_paths2).union(set(from_paths)), "create_similarity_gallery")
return df2 | Function to create and display a gallery of images computed by the outliers metrics Parameters: similarity_file (str): csv file with the computed image statistics by the fastdup tool, alternatively a pandas dataframe can be passed in directly. save_path (str): output folder location for the visuals num_images(int): Max number of images to display (default = 50). Be careful not to display too many images at once otherwise the notebook may go out of memory. lazy_load (boolean): If False, write all images inside html file using base64 encoding. Otherwise use lazy loading in the html to load images when mouse curser is above the image (reduced html file size). get_label_func (callable): optional function given an absolute path to an image return the image label. Image label can be a string or a list of strings. Alternatively, get_label_func can be a dictionary where the key is the absolute file name and the value is the label or list of labels. Alternatively, get_label_func can be a filename containing string label for each file. First row should be index,label. Label file should be same length and same order of the atrain_features_data.csv image list file. metric (str): Optional metric selection. One of blur, size, mean, min, max, width, height, unique. slice (str or list): Optional parameter to select a slice of the outliers file based on a specific label or a list of labels. A special value is 'label_score' which is used for comparing both images and labels of the nearest neighbors. max_width (int): Optional param to limit the image width descending (bool): Optional param to control the order of the metric get_bounding_box_func (callable): Optional parameter to allow plotting bounding boxes on top of the image. The input is an absolute path to the image and the output is a list of bounding boxes. Each bounding box should be 4 integers: x1, y1, x2, y2. Example of valid bounding box list: [[0, 0, 100, 100]] Alternatively, get_bounding_box_func could be a dictionary returning the bounding box list for each filename. Alternatively, get_bounding_box_func could be a csv containing index,filename,col_x,row_y,width,height or a work_dir where the file atrain_crops.csv exists get_reformat_filename_func (callable): Optional parameter to allow reformatting the filename before displaying it in the report. This is a function the user implements that gets the full file path and returns a string with the reformatted filename. get_extra_col_func (callable): Optional parameter to allow adding more image information to the report like the image label. This is a function the user implements that gets the full file path and returns html string with the label or any other metadata desired. input_dir (str): Optional parameter to specify the input directory of webdataset tar files, in case when working with webdataset tar files where the image was deleted after run using turi_param='delete_img=1' work_dir (str): Optional parameter to fastdup work_dir. Needed when similarity_file is a pd.DataFrame. min_items (int): Minimal number of items in the similarity group (optional). max_items (int): Maximal number of items in the similarity group (optional). Returns: ret (pd.DataFrame): Dataframe with the image statistics |
6,503 | import os
import pandas as pd
import cv2
import time
import numpy as np
import traceback
import shutil
import pathlib
from fastdup.image import plot_bounding_box, my_resize, get_type, imageformatter, create_triplet_img, fastdup_imread, calc_image_path, clean_images, pad_image, enhance_image, fastdup_imwrite
from fastdup.definitions import *
import re
from multiprocessing import Pool
from fastdup.sentry import *
from fastdup.utils import load_filenames, merge_with_filenames, get_bounding_box_func_helper, load_stats, load_labels, sample_from_components, calc_save_dir, convert_v1_to_v02
def print_success_msg(report_name, out_file, lazy_load):
print(f"Stored {report_name} visual view in ", out_file)
if lazy_load:
print("Note: when using lazy_load=True, the images are relative to the location of the html file. When sharing the report please make"
" sure to include also subolders images & assets.")
def find_label(get_label_func, df, in_col, out_col, vqa_prompt: str = None, kwargs=None):
if (get_label_func is not None):
if isinstance(get_label_func, str):
if os.path.exists(get_label_func):
df_labels = load_labels(get_label_func, kwargs)
assert len(df_labels) == len(df), f"Error: wrong length of labels file {get_label_func} expected {len(df)} got {len(df_labels)}"
df[out_col] = df_labels['label']
elif get_label_func in df.columns:
df[out_col] = df['label']
elif get_label_func in CAPTION_MODEL_NAMES:
from fastdup.captions import generate_labels
df[out_col] = generate_labels(df[in_col], get_label_func, device='cpu')
elif get_label_func == VQA_MODEL1_NAME:
from fastdup.captions import generate_vqa_labels
df[out_col] = generate_vqa_labels(df[in_col], vqa_prompt, kwargs)
elif get_label_func == AGE_LABEL1_NAME:
from fastdup.captions import generate_age_labels
df[out_col] = generate_age_labels(df[in_col], kwargs)
else:
assert False, f"Found str label {get_label_func} but it is neither a file nor a column name in the dataframe {df.columns}"
elif isinstance(get_label_func, dict):
df[out_col] = df[in_col].apply(lambda x: get_label_func.get(x, MISSING_LABEL))
elif callable(get_label_func):
assert len(df), "Empty dataframe"
assert in_col in df.columns, f"Missing column {in_col}"
df[out_col] = df[in_col].apply(lambda x: get_label_func(x))
else:
assert False, f"Failed to understand get_label_func type {type(get_label_func)}"
if kwargs is not None and 'debug_labels' in kwargs:
print(df.head())
return df
def slice_df(df, slice, colname, kwargs=None):
assert len(df), "Df has no rows"
split_sentence_to_label_list = kwargs is not None and 'split_sentence_to_label_list' in kwargs and kwargs['split_sentence_to_label_list']
debug_labels = kwargs is not None and 'debug_labels' in kwargs and kwargs['debug_labels']
grouped = kwargs is not None and 'grouped' in kwargs and kwargs['grouped']
if slice is not None:
if isinstance(slice, str):
# cover the case labels are string or lists of strings
if split_sentence_to_label_list:
labels = df[colname].astype(str).apply(lambda x: split_str(x.lower())).values
if debug_labels:
print('Label with split sentence', labels[:10])
else:
labels = df[colname].astype(str).values
if debug_labels:
print('label without split sentence', labels[:10])
is_list = isinstance(labels[0], list)
if grouped:
df = df[df[colname].apply(lambda x: slice in x)]
assert len(df), f"Failed to find any labels with value={slice}"
elif is_list:
labels = [item for sublist in labels for item in sublist]
if debug_labels:
print('labels after merging sublists', labels[:10])
df = df[df[colname].apply(lambda x: slice in [y.lower() for y in x])]
else:
df2 = df[df[colname] == slice]
if len(df2) == 0:
df2 = df[df[colname].apply(lambda x: slice in str(x))]
df = df2
elif isinstance(slice, list):
if isinstance(df[colname].values[0], list):
df = df[df[colname].apply(lambda x: len(set(x)&set(slice)) > 0)]
else:
df = df[df[colname].isin(slice)]
assert len(df), f"Failed to find any labels with {slice}"
else:
assert False, "slice must be a string or a list of strings"
return df
def lookup_filename(filename, work_dir):
assert isinstance(filename, str), f"Wrong for type {filename} {type(filename)}"
if os.path.exists(filename):
return filename
if filename.startswith(S3_TEMP_FOLDER + get_sep()) or filename.startswith(S3_TEST_TEMP_FOLDER + get_sep()):
assert work_dir is not None, f"Failed to find work_dir on remote_fs: filename was {filename}"
filename = os.path.join(work_dir, filename)
return filename
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def fastdup_imread(img1_path, input_dir, kwargs):
"""
Read an image from local file, or from a tar file, or from s3/minio path using minio client mc
Parameters:
img1_path (str): path to the image
input_dir (str): optional directory path in case the image is found on a webdataset in another path or found in s3
Returns:
img1 (np.array): the image
"""
assert not pd.isnull(img1_path), f"img1_path should not be None {img1_path} {input_dir}, {kwargs}"
is_minio_or_s3 = False
if input_dir is not None and (isinstance(input_dir, str) or isinstance(input_dir, pathlib.Path)):
if input_dir.startswith('~/'):
input_dir = os.path.expanduser(input_dir)
if not input_dir.startswith("s3://") and not input_dir.startswith("minio://"):
assert os.path.exists(input_dir), "Failed to find input_dir: " + input_dir
else:
is_minio_or_s3 = True
if img1_path.startswith('~/'):
img1_path = os.path.expanduser(img1_path)
if os.path.exists(img1_path):
img = inner_read(img1_path)
return img
elif ('/' +S3_TEMP_FOLDER + '/' in img1_path or '/' + S3_TEST_TEMP_FOLDER + '/' in img1_path) and \
'.tar/' in img1_path:
assert os.path.exists(input_dir), "Failed to find input dir " + input_dir
pos = os.path.dirname(img1_path).find(input_dir.replace('/',''))
tar_file = os.path.dirname(img1_path)[pos+len(input_dir.replace('/','')):]
tar_file = os.path.join(input_dir, tar_file)
if kwargs is not None and "reformat_tar_name" in kwargs and callable(kwargs['reformat_tar_name']):
tar_file = kwargs["reformat_tar_name"](tar_file)
print('Found tar file', tar_file)
img_name = os.path.basename(img1_path)
try:
with tarfile.open(tar_file, "r") as tar:
f = tar.extractfile(img_name)
return cv2.imdecode(np.frombuffer(f.read(), np.uint8), cv2.IMREAD_COLOR)
except Exception as ex:
fastdup_capture_exception("fastdup_imread", ex)
print("Error reading from tar file: ", tar_file, ex)
return None
elif is_minio_or_s3 and input_dir is not None:
if input_dir.startswith("minio://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
minio_prefix = "/".join(input_dir.replace("minio://", "").split('/')[:2])
#print('minio_prefix', minio_prefix)
download_minio(minio_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
assert ret is not None, f"Failed to read image {os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path))}"
return ret
elif input_dir.startswith("s3://"):
local_dir_no_temp = truncate_folder_name(os.path.dirname(img1_path))
s3_prefix = 's3://' + "/".join(input_dir.replace("s3://", "").split('/')[:1])
#print('s3_prefix', s3_prefix)
download_s3(s3_prefix + '/' + local_dir_no_temp + '/' + os.path.basename(img1_path), S3_TEMP_FOLDER)
ret = inner_read(os.path.join(S3_TEMP_FOLDER, os.path.basename(img1_path)))
return ret
#Failed to read image1 ..\milvus_vector_db\data\images\..\milvus_vector_db\data\images\Egyptian_Mau_210.jpg
elif input_dir is not None and img1_path.startswith(input_dir) and len(img1_path) >= len(input_dir) +2:
suffix = img1_path[len(input_dir):]
if input_dir in suffix and os.path.exists(suffix):
img = inner_read(suffix)
return img
elif "''" in img1_path: # try to handle french and other languages where c side doubles the '' otherwise pandas can't read it
new_img1_path = img1_path.replace("''","")
if os.path.exists(new_img1_path):
img = inner_read(new_img1_path)
return img
print('Failed to read image from img_path', img1_path)
return None
def imageformatter(im, max_width=None):
if im is None:
return ""
if max_width is not None:
return f'<img src="data:image/jpeg;base64,{image_base64(im)}" width="{max_width}">'
else:
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
def my_resize(img, max_width):
if img is None:
return None
h, w, c = get_shape(img)
w1 = 320
if max_width is not None and w > max_width:
w1 = max_width
aspect = h/w
if (h > w1 or w > w1) and aspect > 0 and int(w1/aspect) > 0 and w1 > 0:
img = cv2.resize(img, (int(w1/aspect), w1))
return img
def write_to_html_file(df, title='', filename='out.html', stats_info = None, subtitle=None, max_width=None,
write_row_name=True, jupyter_html=False):
work_dir = os.path.dirname(filename)
# css_dir = os.path.join(work_dir, 'css')
# if not os.path.exists(css_dir):
# os.mkdir(css_dir)
# assert os.path.exists(css_dir)
# write_css(css_dir, max_width)
# write_css_map(css_dir)
# copy_assets(work_dir)
''' Write an entire dataframe to an HTML file with nice formatting. '''
#if stats_info is not None:
# result += '<left>' + stats_info + '</left><br>'
result = write_html_header(title, subtitle, max_width, jupyter_html)
result += write_component_header()
for i,row in df.iterrows():
result += write_component(df.columns, row, i, max_width, write_row_name)
result += write_components_footer()
result += write_html_footer()
# result += df.to_html(classes='wide', escape=False)
# result += ''' </body>
# </html> '''
with open(filename, 'w') as f:
f.write(result)
assert os.path.exists(filename), "Failed to write file " + filename
The provided code snippet includes necessary dependencies for implementing the `do_create_aspect_ratio_gallery` function. Write a Python function `def do_create_aspect_ratio_gallery(stats_file, save_path, get_label_func=None, lazy_load=False, max_width=None, num_images=0, slice=None, get_reformat_filename_func=None, input_dir=None, **kwargs)` to solve the following problem:
Create an html gallery of images with aspect ratio stats_file: save_path: get_label_func: max_width: num_images: slice: get_reformat_filename_func: Returns:
Here is the function:
def do_create_aspect_ratio_gallery(stats_file, save_path, get_label_func=None, lazy_load=False, max_width=None, num_images=0, slice=None,
get_reformat_filename_func=None, input_dir=None, **kwargs):
'''
Create an html gallery of images with aspect ratio
stats_file:
save_path:
get_label_func:
max_width:
num_images:
slice:
get_reformat_filename_func:
Returns:
'''
try:
import matplotlib.pyplot as plt
except Exception as e:
fastdup_capture_exception("create_aspect_ratio_gallery", e)
print(MATPLOTLIB_ERROR_MSG)
return None
from .html_writer import write_to_html_file
from .image import imageformatter
import matplotlib.pyplot as plt
work_dir = None
if isinstance(stats_file, pd.DataFrame):
df = stats_file
else:
work_dir = os.path.dirname(os.path.abspath(stats_file))
df = pd.read_csv(stats_file)
assert len(df), "Zero rows found in " + stats_file
if num_images is not None and num_images>0:
df = df.head(num_images)
if get_label_func is not None:
df = find_label(get_label_func, df, 'filename', 'label', kwargs)
df = slice_df(df, slice, 'label', kwargs)
assert len(df), "Empty stats file " + stats_file
df = df[df['width'] > DEFUALT_METRIC_ZERO]
df = df[df['height'] > DEFUALT_METRIC_ZERO]
shape = df[['width','height']].to_numpy()
max_width_ = np.max(shape[:,0])
max_height_ = np.max(shape[:,1])
ret = shape[:,0]/shape[:,1]
max_dim = max(max_height_, max_width_)
fig, axs = plt.subplots(2, 1, figsize=(10, 10))
axs[0].scatter(shape[:,0], shape[:, 1])
axs[0].plot(range(0, max_dim), range(0, max_dim), 'k')
axs[0].set_ylabel('Width', fontsize=13)
axs[0].set_xlabel('Height', fontsize=13)
axs[0].grid()
axs[0].set_title('Scatter of images shapes', fontsize=18)
axs[0].set_xlim([0, max_width_])
axs[0].set_ylim([0, max_height_])
axs[1].hist(shape[:, 0]/shape[:, 1], bins=100)
axs[1].grid()
axs[1].set_xlabel('Aspect Ratio', fontsize=13)
axs[1].set_ylabel('Frequency', fontsize=13)
axs[1].set_title('Histogram of aspect ratio for images', fontsize=18)
axs[1].set_xlim([0, 2])
local_fig = f"{save_path}/aspect_ratio.jpg"
fig.savefig(local_fig ,dpi=100)
img = cv2.imread(local_fig)
max_width_img = df[df['width'] == max_width_]['filename'].values[0]
max_width_img = lookup_filename(max_width_img, work_dir)
max_height_img = df[df['height'] == max_height_]['filename'].values[0]
max_height_img = lookup_filename(max_height_img, work_dir)
try:
img_max_width = fastdup_imread(max_width_img, input_dir, kwargs)
img_max_height = fastdup_imread(max_height_img, input_dir, kwargs)
if max_width is not None:
img_max_width = my_resize(img_max_width, max_width)
img_max_height = my_resize(img_max_height, max_width)
except Exception as ex:
print("Failed to read images ", max_width_img, max_height_img)
fastdup_capture_exception("aspect ratio", ex)
img_max_width = None
img_max_height = None
if get_reformat_filename_func is not None:
max_width_img = get_reformat_filename_func(max_width_img)
max_height_img = get_reformat_filename_func(max_height_img)
aspect_ratio_info = pd.DataFrame({'Number of images':[len(df)],
'Avg width':[np.mean(shape[0, :])],
'Avg height':[np.mean(shape[1, :])],
'Max width': [max_width_],
'Max height': [max_height_],
'Plot':[imageformatter(img, None)],
'Max width Image<br>' + max_width_img+ f'<br>width: {max_width_}':[imageformatter(img_max_width, max_width)],
'Max height Image<br>' + max_height_img + f'<br>height: {max_height_}':[imageformatter(img_max_height, max_width)]
}).T
ret = pd.DataFrame({'stats':[aspect_ratio_info]})
title = 'Aspect ratio report'
out_file = os.path.join(save_path, 'aspect_ratio.html')
print_success_msg('aspect ratio', out_file, lazy_load)
return write_to_html_file(ret, title, out_file, None) | Create an html gallery of images with aspect ratio stats_file: save_path: get_label_func: max_width: num_images: slice: get_reformat_filename_func: Returns: |
6,504 | import cv2
from collections import defaultdict
import json
from PIL import Image
import numpy as np
def export_to_coco(df, bbox_col, label_col, json_filename):
# Initialize COCO formatted dictionary
coco_format = defaultdict(list)
# Initialize counters for unique IDs
image_id = 0
annotation_id = 0
# Initialize a category set to keep track of unique categories
category_set = set()
# Iterate through each row in the DataFrame to populate the COCO dictionary
for _, row in df.iterrows():
# Add image information
image_id += 1
with Image.open(row["filename"]) as img:
width, height = img.size
image_info = {
"id": image_id,
"file_name": row["filename"],
"width": width,
"height": height,
}
coco_format["images"].append(image_info)
# Parse bounding boxes and labels
bboxes = row[bbox_col]
labels = row[label_col]
for bbox, label in zip(bboxes, labels):
# Update category set
category_set.add(label)
# Add annotation information
annotation_id += 1
x1, y1, x2, y2 = bbox
width = x2 - x1
height = y2 - y1
area = width * height
annotation_info = {
"id": annotation_id,
"image_id": image_id,
"category_id": label, # Set to label name instead of None
"bbox": [x1, y1, width, height],
"area": area,
"iscrowd": 0,
}
coco_format["annotations"].append(annotation_info)
# Add categories to COCO format
for category_id, category_name in enumerate(sorted(list(category_set)), start=1):
category_info = {"id": category_id, "name": category_name}
coco_format["categories"].append(category_info)
# Update category IDs in annotations
category_map = {
name: idx for idx, name in enumerate(sorted(list(category_set)), start=1)
}
for annotation in coco_format["annotations"]:
annotation["category_id"] = category_map[annotation["category_id"]]
with open(json_filename, "w") as f:
json.dump(coco_format, f) | null |
6,505 | import cv2
from collections import defaultdict
import json
from PIL import Image
import numpy as np
def generate_colormap(labels, hue_start=0.1, hue_end=0.9, saturation=0.9, value=0.8):
"""
Generate a colormap for a set of unique labels while avoiding bright colors.
Parameters:
labels (iterable): An iterable object containing labels.
hue_start (float): The start value of the hue range in HSV space.
hue_end (float): The end value of the hue range in HSV space.
saturation (float): Saturation level to set for the colors.
value (float): Brightness level to set for the colors.
Returns:
dict: A dictionary mapping labels to colors in RGB format.
"""
try:
from matplotlib.colors import hsv_to_rgb
except:
print("matplotlib dependency is needed please install using pip3 install matplotlib")
raise
unique_labels = set(labels)
n_labels = len(unique_labels)
# Define ranges for Hue
hue_range = np.linspace(hue_start, hue_end, n_labels)
# Create colormap in HSV and then convert to RGB
colormap_hsv = np.zeros((n_labels, 3))
colormap_hsv[:, 0] = hue_range
colormap_hsv[:, 1] = saturation
colormap_hsv[:, 2] = value
colormap = [hsv_to_rgb(color) for color in colormap_hsv]
# Create a label to color mapping
label_to_color = {label: colormap[i] for i, label in enumerate(unique_labels)}
return label_to_color
def annotate_image(image_path: str, annotations: dict):
try:
import matplotlib.pyplot as plt
import matplotlib.patches as patches
except:
print("matplotlib dependency is needed please install using pip3 install matplotlib")
raise
# Read the image
img = plt.imread(image_path)
# Create a new figure and axis
fig, ax = plt.subplots(1)
# Display the image
ax.imshow(img)
label_to_color = generate_colormap(annotations["labels"])
# Iterate over the labels, scores, and boxes to draw them on the image
for label, score, box in zip(
annotations["labels"], annotations["scores"], annotations["boxes"]
):
x1, y1, x2, y2 = box
color = label_to_color.get(label, (0, 0, 0, 1)) # Fallback color is black
rect = patches.Rectangle(
(x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor=color, facecolor="none"
)
ax.add_patch(rect)
ax.text(
x1,
y1,
f"{label} | {score:.2f}",
fontsize=10,
color="white",
bbox=dict(facecolor=color, edgecolor=color, boxstyle="round,pad=0.5"),
)
# Show the image with annotations
plt.show() | null |
6,506 | import cv2
from collections import defaultdict
import json
from PIL import Image
import numpy as np
def generate_colormap(labels, hue_start=0.1, hue_end=0.9, saturation=0.9, value=0.8):
"""
Generate a colormap for a set of unique labels while avoiding bright colors.
Parameters:
labels (iterable): An iterable object containing labels.
hue_start (float): The start value of the hue range in HSV space.
hue_end (float): The end value of the hue range in HSV space.
saturation (float): Saturation level to set for the colors.
value (float): Brightness level to set for the colors.
Returns:
dict: A dictionary mapping labels to colors in RGB format.
"""
try:
from matplotlib.colors import hsv_to_rgb
except:
print("matplotlib dependency is needed please install using pip3 install matplotlib")
raise
unique_labels = set(labels)
n_labels = len(unique_labels)
# Define ranges for Hue
hue_range = np.linspace(hue_start, hue_end, n_labels)
# Create colormap in HSV and then convert to RGB
colormap_hsv = np.zeros((n_labels, 3))
colormap_hsv[:, 0] = hue_range
colormap_hsv[:, 1] = saturation
colormap_hsv[:, 2] = value
colormap = [hsv_to_rgb(color) for color in colormap_hsv]
# Create a label to color mapping
label_to_color = {label: colormap[i] for i, label in enumerate(unique_labels)}
return label_to_color
def plot_annotations(
df,
image_col="filename",
bbox_col=None,
labels_col=None,
scores_col=None,
tags_col=None,
masks_col=None,
num_rows=5,
):
try:
import matplotlib.pyplot as plt
except:
print("matplotlib dependency is needed please install using pip3 install matplotlib")
raise
df = df.head(num_rows)
num_subplots = 1 # always show original
if bbox_col:
num_subplots += 1
if masks_col:
num_subplots += 1
num_rows = len(df)
fig, axes = plt.subplots(
num_rows, num_subplots, figsize=(6 * num_subplots, 6 * num_rows)
)
if num_rows == 1:
axes = [axes]
for idx, (_, row) in enumerate(df.iterrows()):
try:
image = cv2.imread(row[image_col])
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except Exception as e:
print(f"Error reading image {row[image_col]}: {e}")
continue
# Original image
axes[idx][0].imshow(image_rgb)
axes[idx][0].set_title("Original Image")
axes[idx][0].axis("off")
subplot_idx = 1
# Bounding boxes
if bbox_col:
axes[idx][subplot_idx].imshow(image_rgb)
axes[idx][subplot_idx].set_title("Annotated Boxes")
axes[idx][subplot_idx].axis("off")
for bbox, label, score in zip(
row[bbox_col], row[labels_col], row[scores_col]
):
x_min, y_min, x_max, y_max = bbox
label_to_color = generate_colormap(row[labels_col])
color = label_to_color.get(
label, (0, 0, 0, 1)
) # Fallback color is black
axes[idx][subplot_idx].add_patch(
plt.Rectangle(
(x_min, y_min),
x_max - x_min,
y_max - y_min,
linewidth=2,
edgecolor=color,
facecolor="none",
)
)
axes[idx][subplot_idx].text(
x_min,
y_min - 5,
f"{label} | {score:.2f}",
color="white",
bbox=dict(
facecolor=color, edgecolor=color, boxstyle="round,pad=0.5"
),
)
subplot_idx += 1
# Masks
if masks_col:
axes[idx][subplot_idx].imshow(image_rgb)
axes[idx][subplot_idx].set_title("Annotated Masks")
axes[idx][subplot_idx].axis("off")
masks = row[masks_col].cpu().numpy()
for mask in masks:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
axes[idx][subplot_idx].imshow(mask_image, alpha=0.9)
plt.tight_layout()
plt.show() | null |
6,507 | import torch
from fastdup.sentry import fastdup_capture_exception
from fastdup.definitions import MISSING_LABEL
from tqdm import tqdm
import nltk
device_to_captioner = {}
from nltk.corpus import stopwords
def format_captions(captions):
# Remove stop words
filtered_text = ' '.join(word for word in captions.split()[:8] if word.lower() not in stop_words)
# Split the text into words and count their occurrences
return filtered_text
def init_captioning(model_name='automatic', device='cpu', batch_size=8, max_new_tokens=20,
use_float_16=True):
global device_to_captioner
# use GPU if device is specified
if isinstance(device, str):
if device == 'gpu':
device = 0
elif device == 'cpu':
device = -1
use_float_16 = False
# confirm necessary dependencies are installed, and import them
try:
from transformers import pipeline
from transformers.utils import logging
logging.set_verbosity(50)
except Exception as e:
fastdup_capture_exception("Auto generate labels", e)
print("Auto captioning requires an installation of the following libraries:\n")
print(" huggingface transformers\n pytorch\n")
print(" to install, use `pip3 install transformers torch`")
raise
model = "Salesforce/blip-image-captioning-large"
has_gpu = torch.cuda.is_available()
captioner = pipeline("image-to-text", model=model, device=device if has_gpu else "cpu", max_new_tokens=max_new_tokens,
torch_dtype=torch.float16 if use_float_16 else torch.float32)
device_to_captioner[device] = captioner
return captioner
def fastdup_capture_exception(section, e, warn_only=False, extra=""):
if not warn_only:
traceback.print_exc()
if 'SENTRY_OPT_OUT' not in os.environ:
with sentry_sdk.push_scope() as scope:
scope.set_tag("section", section)
scope.set_tag("unit_test", unit_test)
scope.set_tag("token", token)
scope.set_tag("platform", platform.platform())
scope.set_tag("platform.version", platform.version())
scope.set_tag("python", sys.version.strip().replace("\n", " "))
scope.set_tag("production", "FASTDUP_PRODUCTION" in os.environ)
if extra != "":
scope.set_tag("extra", extra)
capture_exception(e, scope=scope)
MISSING_LABEL = "N/A"
def generate_labels(filenames, model_name='automatic', device = 'cpu', batch_size=8, max_new_tokens=20, use_float_16=True):
global device_to_captioner
if device not in device_to_captioner:
captioner = init_captioning(model_name, device, batch_size, max_new_tokens, use_float_16)
else:
captioner = device_to_captioner[device]
captions = []
# generate captions
try:
for i in tqdm(range(0, len(filenames), batch_size)):
chunk = filenames[i:i + batch_size]
try:
for pred in captioner(chunk, batch_size=batch_size):
charstring = ' '
caption = charstring.join([d['generated_text'] for d in pred])
# Split the sentence into words
words = caption.split()
# Filter out words containing '#'
filtered_words = [word for word in words if '#' not in word]
# Join the filtered words back into a sentence
caption = ' '.join(filtered_words)
caption = caption.strip()
caption = format_captions(caption)
captions.append(caption)
except Exception as ex:
print("Failed to caption chunk", chunk[:5], ex)
captions.extend([MISSING_LABEL] * len(chunk))
except Exception as e:
fastdup_capture_exception("Auto caption image", e)
return [MISSING_LABEL] * len(filenames)
return captions | null |
6,508 | import shutil
import os
import pandas as pd
from fastdup.image import image_base64
from pathlib import Path
import numbers
from fastdup.sentry import fastdup_capture_exception
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def write_css_map(css_dir):
css_map = os.path.join(css_dir, 'style.css.map')
with open(css_map, 'w') as f:
f.write('''{"version":3,"sourceRoot":"","sources":["../scss/base/_base.scss","../scss/abstracts/_variables.scss","../scss/base/_fonts.scss","../scss/components/_container.scss","../scss/components/_link.scss","../scss/abstracts/_mixins.scss","../scss/components/_table.scss","../scss/layout/_hero.scss","../scss/layout/_main.scss","../scss/components/_graph.scss","../scss/components/_components.scss","../scss/components/_component.scss"],"names":[],"mappings":"AAGA;EACC;;;AAED;EACC;EACA;EACA;EACA;EACA;;AACA;EACC;;AACA;EACC;EACA;EACA;EACA;;AAIC;EACC,YCbG;;;ADmBR;AAAA;AAAA;EAGC;;;AAED;AAAA;EAEC;;;AAED;EACC,aClCM;EDmCN;EACA,OC9BY;ED+BZ;EACA;EACA,YClCO;EDmCP;;;AAED;EACC,aC3CM;ED4CN;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA;EACA,OClDY;;;ADuDb;EAEC;EACA;;;AAED;EAEC;EACA;;;AAED;EACC;EACA;EACA;;;AAED;EACC;EACA;;;AAED;AAAA;EAEC;EACA;EACA;;;AAED;EACC;;;AAED;EACC;EACA,OCrFY;;;ADuFb;EACC;EACA,OCzFY;ED0FZ;;;AAED;EACC;EACA,OC9FY;ED+FZ;;;AAED;EACC;EACA,OCnGY;;;ADqGb;EACC;;;AAED;EACC;;;AAED;EACC;;;AAED;EACC;;;AEvHD;EACC;EACA;EACA;EACA;EACA;;AAGD;EACC;EACA;EACA;EACA;EACA;;AAGD;EACC;EACA;EACA;EACA;EACA;;ACxBD;EACC;EACA;;;AAGD;EACC;IACC;;;AAIF;EACC;IACC;;;AAIF;EACC;IACC;;;AAIF;EACC;IACC;IACA;;;AC1BF;EACC,OHaY;EGZZ;EACA;;AACA;ECcA;EACA;EACA,MDfiB;ECgBjB;EACA,ODjBoB;ECkBpB,QDlB0B;ECmB1B;EDlBC,kBHQW;EGPX;EACA;EACA;EACA;;AAED;EACC;;AAED;EACC;;;AEhBF;EACC;EACA;EACA;EACA;;AACA;EDAA;EACA,gBAF4B;EAG5B,iBAH6E;EAI7E,aAJ+C;EAK/C,WALqG;;ACGpG;EACC;;AAED;EACC,YLMS;;AKJV;EACC;;AAED;EACC;;AAGF;EACC;EACA;EACA;EACA,YLRU;EKSV,OLfM;EKgBN;EDbD,WCce;EDbf,aCauB;EACtB;EACA;;AAED;EACC;EACA;EACA;EACA;EDtBD,WCuBe;EDtBf,aCsBuB;EACtB;EACA,OLzBS;EK0BT;EACA;EACA;;AACA;EACC;EACA;;;AC5CH;EACC;;AACA;EACC;EACA;EACA;EACA;;AAED;EACC;;AAED;EACC;EFPD;EACA,gBEOe;EFNf,iBAH6E;EAI7E,aEKuB;EFJvB,WALqG;;AEUpG;EACC;EACA;;AAED;EFLD,WEMgB;EFLhB,aEKwB;EACtB;EACA;EACA;EACA;;AAED;EACC;EFbF,WEcgB;EFbhB,aEasB;EACpB;EACA,ONnBK;;;AOVR;EACC,YPcY;;;AQdZ;EACC;EACA;EACA;;AAMD;EACC;;;ACXF;EACC;EACA;;;ACFD;EACC;EACA;EACA;EACA;;AACA;EACC;;AAED;EACC;EACA;;AACA;EACC;EACA;EACA;EACA;EACA;ENHF,WMIgB;ENHhB,aMGwB;EACtB,OVLQ;EUMR;;AAGF;EACC;EACA;EACA;EACA;EACA;;AACA;EACC;EACA;EACA;EACA;EACA,OVvBK;EIGP,WMqBgB;ENpBhB,aMoBwB;EACtB;;AAED;EACC;EACA","file":"style.css"}''') | null |
6,509 | import shutil
import os
import pandas as pd
from fastdup.image import image_base64
from pathlib import Path
import numbers
LOCAL_DIR = os.path.dirname(__file__)
from fastdup.sentry import fastdup_capture_exception
import os
os.environ["QT_QPA_PLATFORM"] ="offscreen"
os.environ['FASTDUP_LOCAL_DIR'] = LOCAL_DIR
if not os.path.exists(so_file):
print("Failed to find shared object", so_file);
print("Current init file is on", __file__);
sys.exit(1)
if not os.path.exists(model_path_full):
fastdup_capture_exception("Bad Install", RuntimeError("Failed to find ort model on init " + __file__))
print("Failed to find ort model on init " + __file__)
sys.exit(1)
def copy_assets(work_dir):
assert os.path.exists(work_dir)
assets_dir = os.path.join(work_dir, 'assets')
if not os.path.exists(assets_dir):
os.mkdir(assets_dir)
assert os.path.exists(assets_dir)
local_assets = os.path.join(LOCAL_DIR, 'assets')
assert os.path.exists(local_assets) and os.path.isdir(local_assets)
shutil.copytree(local_assets, assets_dir, dirs_exist_ok=True)
fonts_dir = os.path.join(assets_dir, 'fonts')
logo_dir = os.path.join(assets_dir, 'logo')
assert os.path.exists(fonts_dir) and os.path.isdir(fonts_dir)
assert os.path.exists(logo_dir) and os.path.isdir(logo_dir)
assert os.path.exists(os.path.join(logo_dir, 'logo.svg'))
assert os.path.exists(os.path.join(logo_dir, 'corner.svg')) | null |
6,510 | import numpy as np
import pandas as pd
import torch
from torch.nn import Linear, Module, Parameter, ReLU, Sequential
from torch.nn.functional import cross_entropy
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from ctgan.data_transformer import DataTransformer
from ctgan.synthesizers.base import BaseSynthesizer, random_state
def _loss_function(recon_x, x, sigmas, mu, logvar, output_info, factor):
st = 0
loss = []
for column_info in output_info:
for span_info in column_info:
if span_info.activation_fn != 'softmax':
ed = st + span_info.dim
std = sigmas[st]
eq = x[:, st] - torch.tanh(recon_x[:, st])
loss.append((eq ** 2 / 2 / (std ** 2)).sum())
loss.append(torch.log(std) * x.size()[0])
st = ed
else:
ed = st + span_info.dim
loss.append(cross_entropy(
recon_x[:, st:ed], torch.argmax(x[:, st:ed], dim=-1), reduction='sum'))
st = ed
assert st == recon_x.size()[1]
KLD = -0.5 * torch.sum(1 + logvar - mu**2 - logvar.exp())
return sum(loss) * factor / x.size()[0], KLD / x.size()[0] | null |
6,511 | import contextlib
import numpy as np
import torch
def set_random_states(random_state, set_model_random_state):
"""Context manager for managing the random state.
Args:
random_state (int or tuple):
The random seed or a tuple of (numpy.random.RandomState, torch.Generator).
set_model_random_state (function):
Function to set the random state on the model.
"""
original_np_state = np.random.get_state()
original_torch_state = torch.get_rng_state()
random_np_state, random_torch_state = random_state
np.random.set_state(random_np_state.get_state())
torch.set_rng_state(random_torch_state.get_state())
try:
yield
finally:
current_np_state = np.random.RandomState()
current_np_state.set_state(np.random.get_state())
current_torch_state = torch.Generator()
current_torch_state.set_state(torch.get_rng_state())
set_model_random_state((current_np_state, current_torch_state))
np.random.set_state(original_np_state)
torch.set_rng_state(original_torch_state)
The provided code snippet includes necessary dependencies for implementing the `random_state` function. Write a Python function `def random_state(function)` to solve the following problem:
Set the random state before calling the function. Args: function (Callable): The function to wrap around.
Here is the function:
def random_state(function):
"""Set the random state before calling the function.
Args:
function (Callable):
The function to wrap around.
"""
def wrapper(self, *args, **kwargs):
if self.random_states is None:
return function(self, *args, **kwargs)
else:
with set_random_states(self.random_states, self.set_random_state):
return function(self, *args, **kwargs)
return wrapper | Set the random state before calling the function. Args: function (Callable): The function to wrap around. |
6,512 | import argparse
from ctgan.data import read_csv, read_tsv, write_tsv
from ctgan.synthesizers.ctgan import CTGAN
def _parse_args():
parser = argparse.ArgumentParser(description='CTGAN Command Line Interface')
parser.add_argument('-e', '--epochs', default=300, type=int,
help='Number of training epochs')
parser.add_argument('-t', '--tsv', action='store_true',
help='Load data in TSV format instead of CSV')
parser.add_argument('--no-header', dest='header', action='store_false',
help='The CSV file has no header. Discrete columns will be indices.')
parser.add_argument('-m', '--metadata', help='Path to the metadata')
parser.add_argument('-d', '--discrete',
help='Comma separated list of discrete columns without whitespaces.')
parser.add_argument('-n', '--num-samples', type=int,
help='Number of rows to sample. Defaults to the training data size')
parser.add_argument('--generator_lr', type=float, default=2e-4,
help='Learning rate for the generator.')
parser.add_argument('--discriminator_lr', type=float, default=2e-4,
help='Learning rate for the discriminator.')
parser.add_argument('--generator_decay', type=float, default=1e-6,
help='Weight decay for the generator.')
parser.add_argument('--discriminator_decay', type=float, default=0,
help='Weight decay for the discriminator.')
parser.add_argument('--embedding_dim', type=int, default=128,
help='Dimension of input z to the generator.')
parser.add_argument('--generator_dim', type=str, default='256,256',
help='Dimension of each generator layer. '
'Comma separated integers with no whitespaces.')
parser.add_argument('--discriminator_dim', type=str, default='256,256',
help='Dimension of each discriminator layer. '
'Comma separated integers with no whitespaces.')
parser.add_argument('--batch_size', type=int, default=500,
help='Batch size. Must be an even number.')
parser.add_argument('--save', default=None, type=str,
help='A filename to save the trained synthesizer.')
parser.add_argument('--load', default=None, type=str,
help='A filename to load a trained synthesizer.')
parser.add_argument('--sample_condition_column', default=None, type=str,
help='Select a discrete column name.')
parser.add_argument('--sample_condition_column_value', default=None, type=str,
help='Specify the value of the selected discrete column.')
parser.add_argument('data', help='Path to training data')
parser.add_argument('output', help='Path of the output file')
return parser.parse_args() | null |
6,513 | import json
import numpy as np
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `read_csv` function. Write a Python function `def read_csv(csv_filename, meta_filename=None, header=True, discrete=None)` to solve the following problem:
Read a csv file.
Here is the function:
def read_csv(csv_filename, meta_filename=None, header=True, discrete=None):
"""Read a csv file."""
data = pd.read_csv(csv_filename, header='infer' if header else None)
if meta_filename:
with open(meta_filename) as meta_file:
metadata = json.load(meta_file)
discrete_columns = [
column['name']
for column in metadata['columns']
if column['type'] != 'continuous'
]
elif discrete:
discrete_columns = discrete.split(',')
if not header:
discrete_columns = [int(i) for i in discrete_columns]
else:
discrete_columns = []
return data, discrete_columns | Read a csv file. |
6,514 | import json
import numpy as np
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `read_tsv` function. Write a Python function `def read_tsv(data_filename, meta_filename)` to solve the following problem:
Read a tsv file.
Here is the function:
def read_tsv(data_filename, meta_filename):
"""Read a tsv file."""
with open(meta_filename) as f:
column_info = f.readlines()
column_info_raw = [
x.replace('{', ' ').replace('}', ' ').split()
for x in column_info
]
discrete = []
continuous = []
column_info = []
for idx, item in enumerate(column_info_raw):
if item[0] == 'C':
continuous.append(idx)
column_info.append((float(item[1]), float(item[2])))
else:
assert item[0] == 'D'
discrete.append(idx)
column_info.append(item[1:])
meta = {
'continuous_columns': continuous,
'discrete_columns': discrete,
'column_info': column_info
}
with open(data_filename) as f:
lines = f.readlines()
data = []
for row in lines:
row_raw = row.split()
row = []
for idx, col in enumerate(row_raw):
if idx in continuous:
row.append(col)
else:
assert idx in discrete
row.append(column_info[idx].index(col))
data.append(row)
return np.asarray(data, dtype='float32'), meta['discrete_columns'] | Read a tsv file. |
6,515 | import json
import numpy as np
import pandas as pd
The provided code snippet includes necessary dependencies for implementing the `write_tsv` function. Write a Python function `def write_tsv(data, meta, output_filename)` to solve the following problem:
Write to a tsv file.
Here is the function:
def write_tsv(data, meta, output_filename):
"""Write to a tsv file."""
with open(output_filename, 'w') as f:
for row in data:
for idx, col in enumerate(row):
if idx in meta['continuous_columns']:
print(col, end=' ', file=f)
else:
assert idx in meta['discrete_columns']
print(meta['column_info'][idx][int(col)], end=' ', file=f)
print(file=f) | Write to a tsv file. |
6,516 | import pandas as pd
DEMO_URL = 'http://ctgan-demo.s3.amazonaws.com/census.csv.gz'
The provided code snippet includes necessary dependencies for implementing the `load_demo` function. Write a Python function `def load_demo()` to solve the following problem:
Load the demo.
Here is the function:
def load_demo():
"""Load the demo."""
return pd.read_csv(DEMO_URL, compression='gzip') | Load the demo. |
6,517 | import inspect
import operator
import os
import shutil
import stat
import sys
from pathlib import Path
import tomli
from invoke import task
from packaging.requirements import Requirement
from packaging.version import Version
def readme(c):
test_path = Path('tests/readme_test')
if test_path.exists() and test_path.is_dir():
shutil.rmtree(test_path)
cwd = os.getcwd()
os.makedirs(test_path, exist_ok=True)
shutil.copy('README.md', test_path / 'README.md')
os.chdir(test_path)
c.run('rundoc run --single-session python3 -t python3 README.md')
os.chdir(cwd)
shutil.rmtree(test_path) | null |
6,518 | import inspect
import operator
import os
import shutil
import stat
import sys
from pathlib import Path
import tomli
from invoke import task
from packaging.requirements import Requirement
from packaging.version import Version
def check_dependencies(c):
c.run('python -m pip check')
def unit(c):
c.run('python -m pytest ./tests/unit --cov=ctgan --cov-report=xml')
def integration(c):
c.run('python -m pytest ./tests/integration --reruns 3')
def install_minimum(c):
with open('pyproject.toml', 'rb') as pyproject_file:
pyproject_data = tomli.load(pyproject_file)
dependencies = pyproject_data.get('project', {}).get('dependencies', [])
python_version = '.'.join(map(str, sys.version_info[:2]))
minimum_versions = _get_minimum_versions(dependencies, python_version)
if minimum_versions:
c.run(f'python -m pip install {" ".join(minimum_versions)}')
def minimum(c):
install_minimum(c)
check_dependencies(c)
unit(c)
integration(c) | null |
6,519 | import inspect
import operator
import os
import shutil
import stat
import sys
from pathlib import Path
import tomli
from invoke import task
from packaging.requirements import Requirement
from packaging.version import Version
def check_dependencies(c):
c.run('python -m pip check')
def lint(c):
check_dependencies(c)
c.run('flake8 ctgan')
c.run('flake8 tests --ignore=D101')
c.run('isort -c --recursive ctgan tests') | null |
6,520 | import inspect
import operator
import os
import shutil
import stat
import sys
from pathlib import Path
import tomli
from invoke import task
from packaging.requirements import Requirement
from packaging.version import Version
def remove_readonly(func, path, _):
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
def rmdir(c, path):
try:
shutil.rmtree(path, onerror=remove_readonly)
except PermissionError:
pass | null |
6,521 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def onnx_ml_tools_installed():
"""
Checks that *ONNXMLTools* is available.
"""
try:
import onnxmltools
return True
except ImportError:
print("ONNXMLTOOLS not installed. Please check https://github.com/onnx/onnxmltools for instructions.")
return False
def assert_onnx_ml_tools_installed():
assert (
onnx_ml_tools_installed()
), "ONNXMLTOOLS not installed. Please check https://github.com/onnx/onnxmltools for instructions." | null |
6,522 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def onnx_runtime_installed():
def assert_onnx_runtime_installed():
assert onnx_runtime_installed() | null |
6,523 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def sparkml_installed():
"""
Checks that *Spark ML/PySpark* is available.
"""
try:
import pyspark
return True
except ImportError:
return False
def assert_sparkml_installed():
assert sparkml_installed() | null |
6,524 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def sklearn_installed():
"""
Checks that *Sklearn* is available.
"""
try:
import sklearn
return True
except ImportError:
return False
def assert_sklearn_installed():
assert sklearn_installed() | null |
6,525 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def tvm_installed():
"""
Checks that *TVM* is available.
"""
try:
import tvm
except ImportError:
return False
return True
def assert_tvm_installed():
assert tvm_installed() | null |
6,526 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def pandas_installed():
"""
Checks that *Pandas* is available.
"""
try:
import pandas
except ImportError:
return False
return True
def assert_pandas_installed():
assert pandas_installed() | null |
6,527 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
def is_on_github_actions():
return ("CI" in os.environ) and ("GITHUB_RUN_ID" in os.environ) | null |
6,528 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
The provided code snippet includes necessary dependencies for implementing the `get_device` function. Write a Python function `def get_device(model)` to solve the following problem:
Convenient function used to get the runtime device for the model.
Here is the function:
def get_device(model):
"""
Convenient function used to get the runtime device for the model.
"""
assert issubclass(model.__class__, torch.nn.Module)
device = None
if len(list(model.parameters())) > 0:
device = next(model.parameters()).device # Assuming we are using a single device for all parameters
return device | Convenient function used to get the runtime device for the model. |
6,529 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
The provided code snippet includes necessary dependencies for implementing the `dump_versions` function. Write a Python function `def dump_versions(*args)` to solve the following problem:
Utility function used to generate a string containing the versions of the main modules used to convert a model.
Here is the function:
def dump_versions(*args):
"""
Utility function used to generate a string containing the versions of the main modules used to convert a model.
"""
configurations = []
for module in args:
assert isinstance(module, ModuleType)
configurations.append("{}={}".format(str(module.__name__), str(module.__version__)))
return "\n".join(configurations) | Utility function used to generate a string containing the versions of the main modules used to convert a model. |
6,530 | from packaging.version import Version, parse
from types import ModuleType
import numpy as np
import torch
import os
import warnings
import shutil
from hummingbird.ml.exceptions import ConstantError
The provided code snippet includes necessary dependencies for implementing the `check_dumped_versions` function. Write a Python function `def check_dumped_versions(configurations, *args)` to solve the following problem:
When a model is loaded this function is used to check that the versions of the modules used at saving time match with the version at loading time.
Here is the function:
def check_dumped_versions(configurations, *args):
"""
When a model is loaded this function is used to check that the versions of the modules used at saving time match with the version at loading time.
"""
configurations = [configuration.strip() for configuration in configurations]
versions = {version.split("=")[0]: version.split("=")[1] for version in configurations}
if len(versions) != len(args):
warnings.warn(
"Loaded model contains an unexpected number of versions. You are probably loading a model coming from a different Hummingbird version."
)
for current_version in args:
assert isinstance(current_version, ModuleType)
if current_version.__name__ in versions:
loaded_version = versions[current_version.__name__]
if parse(loaded_version) != parse(current_version.__version__):
warnings.warn(
"Version of {} used to save the model ({}) is different than the current version ({}).".format(
current_version.__name__, loaded_version, current_version.__version__
)
)
else:
warnings.warn(
"Module {} expected but not found. You are probably loading a model from a different version of Hummingbird.".format(
current_version.__name__
)
) | When a model is loaded this function is used to check that the versions of the modules used at saving time match with the version at loading time. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.