id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
3,869 | from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
version_file = 'basicsr/version.py'
def get_hash():
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(
[x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str) | null |
3,870 | from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
version_file = 'basicsr/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
3,871 | from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def make_cuda_ext(name, module, sources, sources_cuda=None):
if sources_cuda is None:
sources_cuda = []
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args) | null |
3,872 | from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def get_requirements(filename='requirements.txt'):
return []
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires | null |
3,873 | import argparse
import datetime
import logging
import math
import random
import time
import torch
from os import path as osp
from basicsr.data import create_dataloader, create_dataset
from basicsr.data.data_sampler import EnlargedSampler
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from basicsr.models import create_model
from basicsr.utils import (MessageLogger, check_resume, get_env_info,
get_root_logger, get_time_str, init_tb_logger,
init_wandb_logger, make_exp_dirs, mkdir_and_rename,
set_random_seed)
from basicsr.utils.dist_util import get_dist_info, init_dist
from basicsr.utils.options import dict2str, parse
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def parse(opt_path, is_train=True):
"""Parse option file.
Args:
opt_path (str): Option file path.
is_train (str): Indicate whether in training or not. Default: True.
Returns:
(dict): Options.
"""
with open(opt_path, mode='r') as f:
Loader, _ = ordered_yaml()
opt = yaml.load(f, Loader=Loader)
opt['is_train'] = is_train
# datasets
if 'datasets' in opt:
for phase, dataset in opt['datasets'].items():
# for several datasets, e.g., test_1, test_2
phase = phase.split('_')[0]
dataset['phase'] = phase
if 'scale' in opt:
dataset['scale'] = opt['scale']
if dataset.get('dataroot_gt') is not None:
dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt'])
if dataset.get('dataroot_lq') is not None:
dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq'])
# paths
for key, val in opt['path'].items():
if (val is not None) and ('resume_state' in key
or 'pretrain_network' in key):
opt['path'][key] = osp.expanduser(val)
opt['path']['root'] = osp.abspath(
osp.join(__file__, osp.pardir, osp.pardir, osp.pardir))
if is_train:
experiments_root = osp.join(opt['path']['root'], 'experiments',
opt['name'])
opt['path']['experiments_root'] = experiments_root
opt['path']['models'] = osp.join(experiments_root, 'models')
opt['path']['training_states'] = osp.join(experiments_root,
'training_states')
opt['path']['log'] = experiments_root
opt['path']['visualization'] = osp.join(experiments_root,
'visualization')
# change some options for debug mode
if 'debug' in opt['name']:
if 'val' in opt:
opt['val']['val_freq'] = 8
opt['logger']['print_freq'] = 1
opt['logger']['save_checkpoint_freq'] = 8
else: # test
results_root = osp.join(opt['path']['root'], 'results', opt['name'])
opt['path']['results_root'] = results_root
opt['path']['log'] = results_root
opt['path']['visualization'] = osp.join(results_root, 'visualization')
return opt
def parse_options(is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--input_path', type=str, required=False, help='The path to the input image. For single image inference only.')
parser.add_argument('--output_path', type=str, required=False, help='The path to the output image. For single image inference only.')
args = parser.parse_args()
opt = parse(args.opt, is_train=is_train)
# distributed settings
if args.launcher == 'none':
opt['dist'] = False
print('Disable distributed.', flush=True)
else:
opt['dist'] = True
if args.launcher == 'slurm' and 'dist_params' in opt:
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
print('init dist .. ', args.launcher)
opt['rank'], opt['world_size'] = get_dist_info()
# random seed
seed = opt.get('manual_seed')
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
if args.input_path is not None and args.output_path is not None:
opt['img_path'] = {
'input_img': args.input_path,
'output_img': args.output_path
}
return opt | null |
3,874 | import argparse
import datetime
import logging
import math
import random
import time
import torch
from os import path as osp
from basicsr.data import create_dataloader, create_dataset
from basicsr.data.data_sampler import EnlargedSampler
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from basicsr.models import create_model
from basicsr.utils import (MessageLogger, check_resume, get_env_info,
get_root_logger, get_time_str, init_tb_logger,
init_wandb_logger, make_exp_dirs, mkdir_and_rename,
set_random_seed)
from basicsr.utils.dist_util import get_dist_info, init_dist
from basicsr.utils.options import dict2str, parse
def dict2str(opt, indent_level=1):
"""dict to string for printing options.
Args:
opt (dict): Option dict.
indent_level (int): Indent level. Default: 1.
Return:
(str): Option string for printing.
"""
msg = '\n'
for k, v in opt.items():
if isinstance(v, dict):
msg += ' ' * (indent_level * 2) + k + ':['
msg += dict2str(v, indent_level + 1)
msg += ' ' * (indent_level * 2) + ']\n'
else:
msg += ' ' * (indent_level * 2) + k + ': ' + str(v) + '\n'
return msg
def init_loggers(opt):
log_file = osp.join(opt['path']['log'],
f"train_{opt['name']}_{get_time_str()}.log")
logger = get_root_logger(
logger_name='basicsr', log_level=logging.INFO, log_file=log_file)
logger.info(get_env_info())
logger.info(dict2str(opt))
# initialize wandb logger before tensorboard logger to allow proper sync:
if (opt['logger'].get('wandb')
is not None) and (opt['logger']['wandb'].get('project')
is not None) and ('debug' not in opt['name']):
assert opt['logger'].get('use_tb_logger') is True, (
'should turn on tensorboard when using wandb')
init_wandb_logger(opt)
tb_logger = None
if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name']:
# tb_logger = init_tb_logger(log_dir=f'./logs/{opt['name']}') #mkdir logs @CLY
tb_logger = init_tb_logger(log_dir=osp.join('logs', opt['name']))
return logger, tb_logger | null |
3,875 | import argparse
import datetime
import logging
import math
import random
import time
import torch
from os import path as osp
from basicsr.data import create_dataloader, create_dataset
from basicsr.data.data_sampler import EnlargedSampler
from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher
from basicsr.models import create_model
from basicsr.utils import (MessageLogger, check_resume, get_env_info,
get_root_logger, get_time_str, init_tb_logger,
init_wandb_logger, make_exp_dirs, mkdir_and_rename,
set_random_seed)
from basicsr.utils.dist_util import get_dist_info, init_dist
from basicsr.utils.options import dict2str, parse
def create_dataset(dataset_opt):
"""Create dataset.
Args:
dataset_opt (dict): Configuration for dataset. It constains:
name (str): Dataset name.
type (str): Dataset type.
"""
dataset_type = dataset_opt['type']
# dynamic instantiation
for module in _dataset_modules:
dataset_cls = getattr(module, dataset_type, None)
if dataset_cls is not None:
break
if dataset_cls is None:
raise ValueError(f'Dataset {dataset_type} is not found.')
dataset = dataset_cls(dataset_opt)
logger = get_root_logger()
logger.info(
f'Dataset {dataset.__class__.__name__} - {dataset_opt["name"]} '
'is created.')
return dataset
def create_dataloader(dataset,
dataset_opt,
num_gpu=1,
dist=False,
sampler=None,
seed=None):
"""Create dataloader.
Args:
dataset (torch.utils.data.Dataset): Dataset.
dataset_opt (dict): Dataset options. It contains the following keys:
phase (str): 'train' or 'val'.
num_worker_per_gpu (int): Number of workers for each GPU.
batch_size_per_gpu (int): Training batch size for each GPU.
num_gpu (int): Number of GPUs. Used only in the train phase.
Default: 1.
dist (bool): Whether in distributed training. Used only in the train
phase. Default: False.
sampler (torch.utils.data.sampler): Data sampler. Default: None.
seed (int | None): Seed. Default: None
"""
phase = dataset_opt['phase']
rank, _ = get_dist_info()
if phase == 'train':
if dist: # distributed training
batch_size = dataset_opt['batch_size_per_gpu']
num_workers = dataset_opt['num_worker_per_gpu']
else: # non-distributed training
multiplier = 1 if num_gpu == 0 else num_gpu
batch_size = dataset_opt['batch_size_per_gpu'] * multiplier
num_workers = dataset_opt['num_worker_per_gpu'] * multiplier
dataloader_args = dict(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
sampler=sampler,
drop_last=True,
persistent_workers=True
)
if sampler is None:
dataloader_args['shuffle'] = True
dataloader_args['worker_init_fn'] = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
elif phase in ['val', 'test']: # validation
dataloader_args = dict(
dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
else:
raise ValueError(f'Wrong dataset phase: {phase}. '
"Supported ones are 'train', 'val' and 'test'.")
dataloader_args['pin_memory'] = dataset_opt.get('pin_memory', False)
prefetch_mode = dataset_opt.get('prefetch_mode')
if prefetch_mode == 'cpu': # CPUPrefetcher
num_prefetch_queue = dataset_opt.get('num_prefetch_queue', 1)
logger = get_root_logger()
logger.info(f'Use {prefetch_mode} prefetch dataloader: '
f'num_prefetch_queue = {num_prefetch_queue}')
return PrefetchDataLoader(
num_prefetch_queue=num_prefetch_queue, **dataloader_args)
else:
# prefetch_mode=None: Normal dataloader
# prefetch_mode='cuda': dataloader for CUDAPrefetcher
return torch.utils.data.DataLoader(**dataloader_args)
class EnlargedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
Modified from torch.utils.data.distributed.DistributedSampler
Support enlarging the dataset for iteration-based training, for saving
time when restart the dataloader after each epoch
Args:
dataset (torch.utils.data.Dataset): Dataset used for sampling.
num_replicas (int | None): Number of processes participating in
the training. It is usually the world_size.
rank (int | None): Rank of the current process within num_replicas.
ratio (int): Enlarging ratio. Default: 1.
"""
def __init__(self, dataset, num_replicas, rank, ratio=1):
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = math.ceil(
len(self.dataset) * ratio / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(self.total_size, generator=g).tolist()
dataset_size = len(self.dataset)
indices = [v % dataset_size for v in indices]
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
def create_train_val_dataloader(opt, logger):
# create train and val dataloaders
train_loader, val_loader = None, None
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1)
train_set = create_dataset(dataset_opt)
train_sampler = EnlargedSampler(train_set, opt['world_size'],
opt['rank'], dataset_enlarge_ratio)
train_loader = create_dataloader(
train_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=train_sampler,
seed=opt['manual_seed'])
num_iter_per_epoch = math.ceil(
len(train_set) * dataset_enlarge_ratio /
(dataset_opt['batch_size_per_gpu'] * opt['world_size']))
total_iters = int(opt['train']['total_iter'])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))
logger.info(
'Training statistics:'
f'\n\tNumber of train images: {len(train_set)}'
f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}'
f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}'
f'\n\tWorld size (gpu number): {opt["world_size"]}'
f'\n\tRequire iter number per epoch: {num_iter_per_epoch}'
f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.')
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(
val_set,
dataset_opt,
num_gpu=opt['num_gpu'],
dist=opt['dist'],
sampler=None,
seed=opt['manual_seed'])
logger.info(
f'Number of val images/folders in {dataset_opt["name"]}: '
f'{len(val_set)}')
else:
raise ValueError(f'Dataset phase {phase} is not recognized.')
return train_loader, train_sampler, val_loader, total_epochs, total_iters | null |
3,876 | import cv2
import math
import numpy as np
from scipy.ndimage.filters import convolve
from scipy.special import gamma
from basicsr.metrics.metric_util import reorder_image, to_y_channel
def niqe(img,
mu_pris_param,
cov_pris_param,
gaussian_window,
block_size_h=96,
block_size_w=96):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
Note that we do not include block overlap height and width, since they are
always 0 in the official implementation.
For good performance, it is advisable by the official implemtation to
divide the distorted image in to the same size patched as used for the
construction of multivariate Gaussian model.
Args:
img (ndarray): Input image whose quality needs to be computed. The
image must be a gray or Y (of YCbCr) image with shape (h, w).
Range [0, 255] with float type.
mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (ndarray): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the
image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
assert img.ndim == 2, (
'Input image must be a gray or Y (of YCbCr) image with shape (h, w).')
# crop image
h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
mu = convolve(img, gaussian_window, mode='nearest')
sigma = np.sqrt(
np.abs(
convolve(np.square(img), gaussian_window, mode='nearest') -
np.square(mu)))
# normalize, as in Eq. 1 in the paper
img_nomalized = (img - mu) / (sigma + 1)
feat = []
for idx_w in range(num_block_w):
for idx_h in range(num_block_h):
# process ecah block
block = img_nomalized[idx_h * block_size_h //
scale:(idx_h + 1) * block_size_h //
scale, idx_w * block_size_w //
scale:(idx_w + 1) * block_size_w //
scale]
feat.append(compute_feature(block))
distparam.append(np.array(feat))
# TODO: matlab bicubic downsample with anti-aliasing
# for simplicity, now we use opencv instead, which will result in
# a slight difference.
if scale == 1:
h, w = img.shape
img = cv2.resize(
img / 255., (w // 2, h // 2), interpolation=cv2.INTER_LINEAR)
img = img * 255.
distparam = np.concatenate(distparam, axis=1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = np.nanmean(distparam, axis=0)
# use nancov. ref: https://ww2.mathworks.cn/help/stats/nancov.html
distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)]
cov_distparam = np.cov(distparam_no_nan, rowvar=False)
# compute niqe quality, Eq. 10 in the paper
invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2)
quality = np.matmul(
np.matmul((mu_pris_param - mu_distparam), invcov_param),
np.transpose((mu_pris_param - mu_distparam)))
quality = np.sqrt(quality)
return quality
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
"'HWC' and 'CHW'")
if len(img.shape) == 2:
img = img[..., None]
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
The provided code snippet includes necessary dependencies for implementing the `calculate_niqe` function. Write a Python function `def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y')` to solve the following problem:
Calculate NIQE (Natural Image Quality Evaluator) metric. Ref: Making a "Completely Blind" Image Quality Analyzer. This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. convert_to (str): Whether coverted to 'y' (of MATLAB YCbCr) or 'gray'. Default: 'y'. Returns: float: NIQE result.
Here is the function:
def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y'):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
We use the official params estimated from the pristine dataset.
We use the recommended block size (96, 96) without overlaps.
Args:
img (ndarray): Input image whose quality needs to be computed.
The input image must be in range [0, 255] with float/int type.
The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
If the input order is 'HWC' or 'CHW', it will be converted to gray
or Y (of YCbCr) image according to the ``convert_to`` argument.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether coverted to 'y' (of MATLAB YCbCr) or 'gray'.
Default: 'y'.
Returns:
float: NIQE result.
"""
# we use the official params estimated from the pristine dataset.
niqe_pris_params = np.load('basicsr/metrics/niqe_pris_params.npz')
mu_pris_param = niqe_pris_params['mu_pris_param']
cov_pris_param = niqe_pris_params['cov_pris_param']
gaussian_window = niqe_pris_params['gaussian_window']
img = img.astype(np.float32)
if input_order != 'HW':
img = reorder_image(img, input_order=input_order)
if convert_to == 'y':
img = to_y_channel(img)
elif convert_to == 'gray':
img = cv2.cvtColor(img / 255., cv2.COLOR_BGR2GRAY) * 255.
img = np.squeeze(img)
if crop_border != 0:
img = img[crop_border:-crop_border, crop_border:-crop_border]
niqe_result = niqe(img, mu_pris_param, cov_pris_param, gaussian_window)
return niqe_result | Calculate NIQE (Natural Image Quality Evaluator) metric. Ref: Making a "Completely Blind" Image Quality Analyzer. This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. convert_to (str): Whether coverted to 'y' (of MATLAB YCbCr) or 'gray'. Default: 'y'. Returns: float: NIQE result. |
3,877 | import numpy as np
import torch
import torch.nn as nn
from scipy import linalg
from tqdm import tqdm
from basicsr.models.archs.inception import InceptionV3
def load_patched_inception_v3(device='cuda',
resize_input=True,
normalize_input=False):
# we may not resize the input, but in [rosinality/stylegan2-pytorch] it
# does resize the input.
inception = InceptionV3([3],
resize_input=resize_input,
normalize_input=normalize_input)
inception = nn.DataParallel(inception).eval().to(device)
return inception | null |
3,878 | import numpy as np
import torch
import torch.nn as nn
from scipy import linalg
from tqdm import tqdm
from basicsr.models.archs.inception import InceptionV3
The provided code snippet includes necessary dependencies for implementing the `extract_inception_features` function. Write a Python function `def extract_inception_features(data_generator, inception, len_generator=None, device='cuda')` to solve the following problem:
Extract inception features. Args: data_generator (generator): A data generator. inception (nn.Module): Inception model. len_generator (int): Length of the data_generator to show the progressbar. Default: None. device (str): Device. Default: cuda. Returns: Tensor: Extracted features.
Here is the function:
def extract_inception_features(data_generator,
inception,
len_generator=None,
device='cuda'):
"""Extract inception features.
Args:
data_generator (generator): A data generator.
inception (nn.Module): Inception model.
len_generator (int): Length of the data_generator to show the
progressbar. Default: None.
device (str): Device. Default: cuda.
Returns:
Tensor: Extracted features.
"""
if len_generator is not None:
pbar = tqdm(total=len_generator, unit='batch', desc='Extract')
else:
pbar = None
features = []
for data in data_generator:
if pbar:
pbar.update(1)
data = data.to(device)
feature = inception(data)[0].view(data.shape[0], -1)
features.append(feature.to('cpu'))
if pbar:
pbar.close()
features = torch.cat(features, 0)
return features | Extract inception features. Args: data_generator (generator): A data generator. inception (nn.Module): Inception model. len_generator (int): Length of the data_generator to show the progressbar. Default: None. device (str): Device. Default: cuda. Returns: Tensor: Extracted features. |
3,879 | import numpy as np
import torch
import torch.nn as nn
from scipy import linalg
from tqdm import tqdm
from basicsr.models.archs.inception import InceptionV3
The provided code snippet includes necessary dependencies for implementing the `calculate_fid` function. Write a Python function `def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6)` to solve the following problem:
Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Args: mu1 (np.array): The sample mean over activations. sigma1 (np.array): The covariance matrix over activations for generated samples. mu2 (np.array): The sample mean over activations, precalculated on an representative data set. sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. Returns: float: The Frechet Distance.
Here is the function:
def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Args:
mu1 (np.array): The sample mean over activations.
sigma1 (np.array): The covariance matrix over activations for
generated samples.
mu2 (np.array): The sample mean over activations, precalculated on an
representative data set.
sigma2 (np.array): The covariance matrix over activations,
precalculated on an representative data set.
Returns:
float: The Frechet Distance.
"""
assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, (
'Two covariances have different dimensions')
cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False)
# Product might be almost singular
if not np.isfinite(cov_sqrt).all():
print('Product of cov matrices is singular. Adding {eps} to diagonal '
'of cov estimates')
offset = np.eye(sigma1.shape[0]) * eps
cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(cov_sqrt):
if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3):
m = np.max(np.abs(cov_sqrt.imag))
raise ValueError(f'Imaginary component {m}')
cov_sqrt = cov_sqrt.real
mean_diff = mu1 - mu2
mean_norm = mean_diff @ mean_diff
trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt)
fid = mean_norm + trace
return fid | Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Args: mu1 (np.array): The sample mean over activations. sigma1 (np.array): The covariance matrix over activations for generated samples. mu2 (np.array): The sample mean over activations, precalculated on an representative data set. sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. Returns: float: The Frechet Distance. |
3,880 | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from skimage.metrics import structural_similarity
import torch
def calculate_psnr(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
def calculate_psnr_left(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False):
assert input_order == 'HWC'
assert crop_border == 0
img1 = img1[:,64:,:3]
img2 = img2[:,64:,:3]
return calculate_psnr(img1=img1, img2=img2, crop_border=0, input_order=input_order, test_y_channel=test_y_channel) | null |
3,881 | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from skimage.metrics import structural_similarity
import torch
def prepare_for_ssim(img, k):
import torch
with torch.no_grad():
img = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).float()
conv = torch.nn.Conv2d(1, 1, k, stride=1, padding=k//2, padding_mode='reflect')
conv.weight.requires_grad = False
conv.weight[:, :, :, :] = 1. / (k * k)
img = conv(img)
img = img.squeeze(0).squeeze(0)
img = img[0::k, 0::k]
return img.detach().cpu().numpy() | null |
3,882 | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from skimage.metrics import structural_similarity
import torch
def prepare_for_ssim_rgb(img, k):
import torch
with torch.no_grad():
img = torch.from_numpy(img).float() #HxWx3
conv = torch.nn.Conv2d(1, 1, k, stride=1, padding=k // 2, padding_mode='reflect')
conv.weight.requires_grad = False
conv.weight[:, :, :, :] = 1. / (k * k)
new_img = []
for i in range(3):
new_img.append(conv(img[:, :, i].unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0)[0::k, 0::k])
return torch.stack(new_img, dim=2).detach().cpu().numpy() | null |
3,883 | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from skimage.metrics import structural_similarity
import torch
def calculate_ssim(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False,
ssim3d=True):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the SSIM calculation.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are differnet: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
if type(img1) == torch.Tensor:
if len(img1.shape) == 4:
img1 = img1.squeeze(0)
img1 = img1.detach().cpu().numpy().transpose(1,2,0)
if type(img2) == torch.Tensor:
if len(img2.shape) == 4:
img2 = img2.squeeze(0)
img2 = img2.detach().cpu().numpy().transpose(1,2,0)
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]
def _cal_ssim(img1, img2):
if test_y_channel:
img1 = to_y_channel(img1)
img2 = to_y_channel(img2)
return _ssim_cly(img1[..., 0], img2[..., 0])
ssims = []
# ssims_before = []
# skimage_before = skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True)
# print('.._skimage',
# skimage.metrics.structural_similarity(img1, img2, data_range=255., multichannel=True))
max_value = 1 if img1.max() <= 1 else 255
with torch.no_grad():
final_ssim = _ssim_3d(img1, img2, max_value) if ssim3d else _ssim(img1, img2, max_value)
ssims.append(final_ssim)
# for i in range(img1.shape[2]):
# ssims_before.append(_ssim(img1, img2))
# print('..ssim mean , new {:.4f} and before {:.4f} .... skimage before {:.4f}'.format(np.array(ssims).mean(), np.array(ssims_before).mean(), skimage_before))
# ssims.append(skimage.metrics.structural_similarity(img1[..., i], img2[..., i], multichannel=False))
return np.array(ssims).mean()
if img1.ndim == 3 and img1.shape[2] == 6:
l1, r1 = img1[:,:,:3], img1[:,:,3:]
l2, r2 = img2[:,:,:3], img2[:,:,3:]
return (_cal_ssim(l1, l2) + _cal_ssim(r1, r2))/2
else:
return _cal_ssim(img1, img2)
def calculate_ssim_left(img1,
img2,
crop_border,
input_order='HWC',
test_y_channel=False,
ssim3d=True):
assert input_order == 'HWC'
assert crop_border == 0
img1 = img1[:,64:,:3]
img2 = img2[:,64:,:3]
return calculate_ssim(img1=img1, img2=img2, crop_border=0, input_order=input_order, test_y_channel=test_y_channel, ssim3d=ssim3d) | null |
3,884 | import cv2
import numpy as np
from basicsr.metrics.metric_util import reorder_image, to_y_channel
from skimage.metrics import structural_similarity
import torch
def calculate_skimage_ssim(img1, img2):
return structural_similarity(img1, img2, multichannel=True)
def calculate_skimage_ssim_left(img1, img2):
img1 = img1[:,64:,:3]
img2 = img2[:,64:,:3]
return calculate_skimage_ssim(img1=img1, img2=img2) | null |
3,885 | import cv2
import random
from cv2 import rotate
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `paired_random_crop` function. Write a Python function `def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path)` to solve the following problem:
Paired random crop. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray.
Here is the function:
def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path):
"""Paired random crop.
It crops lists of lq and gt images with corresponding locations.
Args:
img_gts (list[ndarray] | ndarray): GT images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
gt_patch_size (int): GT patch size.
scale (int): Scale factor.
gt_path (str): Path to ground-truth.
Returns:
list[ndarray] | ndarray: GT images and LQ images. If returned results
only have one element, just return ndarray.
"""
if not isinstance(img_gts, list):
img_gts = [img_gts]
if not isinstance(img_lqs, list):
img_lqs = [img_lqs]
h_lq, w_lq, _ = img_lqs[0].shape
h_gt, w_gt, _ = img_gts[0].shape
lq_patch_size = gt_patch_size // scale
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
f'({lq_patch_size}, {lq_patch_size}). '
f'Please remove {gt_path}.')
# randomly choose top and left coordinates for lq patch
top = random.randint(0, h_lq - lq_patch_size)
left = random.randint(0, w_lq - lq_patch_size)
# crop lq patch
img_lqs = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in img_lqs
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
img_gts = [
v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...]
for v in img_gts
]
if len(img_gts) == 1:
img_gts = img_gts[0]
if len(img_lqs) == 1:
img_lqs = img_lqs[0]
return img_gts, img_lqs | Paired random crop. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray. |
3,886 | import cv2
import random
from cv2 import rotate
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `paired_random_crop_hw` function. Write a Python function `def paired_random_crop_hw(img_gts, img_lqs, gt_patch_size_h, gt_patch_size_w, scale, gt_path)` to solve the following problem:
Paired random crop. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray.
Here is the function:
def paired_random_crop_hw(img_gts, img_lqs, gt_patch_size_h, gt_patch_size_w, scale, gt_path):
"""Paired random crop.
It crops lists of lq and gt images with corresponding locations.
Args:
img_gts (list[ndarray] | ndarray): GT images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
img_lqs (list[ndarray] | ndarray): LQ images. Note that all images
should have the same shape. If the input is an ndarray, it will
be transformed to a list containing itself.
gt_patch_size (int): GT patch size.
scale (int): Scale factor.
gt_path (str): Path to ground-truth.
Returns:
list[ndarray] | ndarray: GT images and LQ images. If returned results
only have one element, just return ndarray.
"""
if not isinstance(img_gts, list):
img_gts = [img_gts]
if not isinstance(img_lqs, list):
img_lqs = [img_lqs]
h_lq, w_lq, _ = img_lqs[0].shape
h_gt, w_gt, _ = img_gts[0].shape
lq_patch_size_h = gt_patch_size_h // scale
lq_patch_size_w = gt_patch_size_w // scale
# if h_gt != h_lq * scale or w_gt != w_lq * scale:
# raise ValueError(
# f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
# f'multiplication of LQ ({h_lq}, {w_lq}).')
# if h_lq < lq_patch_size or w_lq < lq_patch_size:
# raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '
# f'({lq_patch_size}, {lq_patch_size}). '
# f'Please remove {gt_path}.')
# randomly choose top and left coordinates for lq patch
top = random.randint(0, h_lq - lq_patch_size_h)
left = random.randint(0, w_lq - lq_patch_size_w)
# crop lq patch
img_lqs = [
v[top:top + lq_patch_size_h, left:left + lq_patch_size_w, ...]
for v in img_lqs
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
img_gts = [
v[top_gt:top_gt + gt_patch_size_h, left_gt:left_gt + gt_patch_size_w, ...]
for v in img_gts
]
if len(img_gts) == 1:
img_gts = img_gts[0]
if len(img_lqs) == 1:
img_lqs = img_lqs[0]
return img_gts, img_lqs | Paired random crop. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray. |
3,887 | import cv2
import random
from cv2 import rotate
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `augment` function. Write a Python function `def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False, vflip=False)` to solve the following problem:
Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). We use vertical flip and transpose for rotation implementation. All the images in the list use the same augmentation. Args: imgs (list[ndarray] | ndarray): Images to be augmented. If the input is an ndarray, it will be transformed to a list. hflip (bool): Horizontal flip. Default: True. rotation (bool): Ratotation. Default: True. flows (list[ndarray]: Flows to be augmented. If the input is an ndarray, it will be transformed to a list. Dimension is (h, w, 2). Default: None. return_status (bool): Return the status of flip and rotation. Default: False. Returns: list[ndarray] | ndarray: Augmented images and flows. If returned results only have one element, just return ndarray.
Here is the function:
def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False, vflip=False):
"""Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees).
We use vertical flip and transpose for rotation implementation.
All the images in the list use the same augmentation.
Args:
imgs (list[ndarray] | ndarray): Images to be augmented. If the input
is an ndarray, it will be transformed to a list.
hflip (bool): Horizontal flip. Default: True.
rotation (bool): Ratotation. Default: True.
flows (list[ndarray]: Flows to be augmented. If the input is an
ndarray, it will be transformed to a list.
Dimension is (h, w, 2). Default: None.
return_status (bool): Return the status of flip and rotation.
Default: False.
Returns:
list[ndarray] | ndarray: Augmented images and flows. If returned
results only have one element, just return ndarray.
"""
hflip = hflip and random.random() < 0.5
if vflip or rotation:
vflip = random.random() < 0.5
rot90 = rotation and random.random() < 0.5
def _augment(img):
if hflip: # horizontal
cv2.flip(img, 1, img)
if img.shape[2] == 6:
img = img[:,:,[3,4,5,0,1,2]].copy() # swap left/right
if vflip: # vertical
cv2.flip(img, 0, img)
if rot90:
img = img.transpose(1, 0, 2)
return img
def _augment_flow(flow):
if hflip: # horizontal
cv2.flip(flow, 1, flow)
flow[:, :, 0] *= -1
if vflip: # vertical
cv2.flip(flow, 0, flow)
flow[:, :, 1] *= -1
if rot90:
flow = flow.transpose(1, 0, 2)
flow = flow[:, :, [1, 0]]
return flow
if not isinstance(imgs, list):
imgs = [imgs]
imgs = [_augment(img) for img in imgs]
if len(imgs) == 1:
imgs = imgs[0]
if flows is not None:
if not isinstance(flows, list):
flows = [flows]
flows = [_augment_flow(flow) for flow in flows]
if len(flows) == 1:
flows = flows[0]
return imgs, flows
else:
if return_status:
return imgs, (hflip, vflip, rot90)
else:
return imgs | Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). We use vertical flip and transpose for rotation implementation. All the images in the list use the same augmentation. Args: imgs (list[ndarray] | ndarray): Images to be augmented. If the input is an ndarray, it will be transformed to a list. hflip (bool): Horizontal flip. Default: True. rotation (bool): Ratotation. Default: True. flows (list[ndarray]: Flows to be augmented. If the input is an ndarray, it will be transformed to a list. Dimension is (h, w, 2). Default: None. return_status (bool): Return the status of flip and rotation. Default: False. Returns: list[ndarray] | ndarray: Augmented images and flows. If returned results only have one element, just return ndarray. |
3,888 | import cv2
import random
from cv2 import rotate
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `img_rotate` function. Write a Python function `def img_rotate(img, angle, center=None, scale=1.0)` to solve the following problem:
Rotate image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[int]): Rotation center. If the center is None, initialize it as the center of the image. Default: None. scale (float): Isotropic scale factor. Default: 1.0.
Here is the function:
def img_rotate(img, angle, center=None, scale=1.0):
"""Rotate image.
Args:
img (ndarray): Image to be rotated.
angle (float): Rotation angle in degrees. Positive values mean
counter-clockwise rotation.
center (tuple[int]): Rotation center. If the center is None,
initialize it as the center of the image. Default: None.
scale (float): Isotropic scale factor. Default: 1.0.
"""
(h, w) = img.shape[:2]
if center is None:
center = (w // 2, h // 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
rotated_img = cv2.warpAffine(img, matrix, (w, h))
return rotated_img | Rotate image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[int]): Rotation center. If the center is None, initialize it as the center of the image. Default: None. scale (float): Isotropic scale factor. Default: 1.0. |
3,889 | import cv2
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop
from basicsr.utils import img2tensor, scandir
def mod_crop(img, scale):
"""Mod crop images, used during testing.
Args:
img (ndarray): Input image.
scale (int): Scale factor.
Returns:
ndarray: Result image.
"""
img = img.copy()
if img.ndim in (2, 3):
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
return img
The provided code snippet includes necessary dependencies for implementing the `read_img_seq` function. Write a Python function `def read_img_seq(path, require_mod_crop=False, scale=1)` to solve the following problem:
Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. Returns: Tensor: size (t, c, h, w), RGB, [0, 1].
Here is the function:
def read_img_seq(path, require_mod_crop=False, scale=1):
"""Read a sequence of images from a given folder path.
Args:
path (list[str] | str): List of image paths or image folder path.
require_mod_crop (bool): Require mod crop for each image.
Default: False.
scale (int): Scale factor for mod_crop. Default: 1.
Returns:
Tensor: size (t, c, h, w), RGB, [0, 1].
"""
if isinstance(path, list):
img_paths = path
else:
img_paths = sorted(list(scandir(path, full_path=True)))
imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths]
if require_mod_crop:
imgs = [mod_crop(img, scale) for img in imgs]
imgs = img2tensor(imgs, bgr2rgb=True, float32=True)
imgs = torch.stack(imgs, dim=0)
return imgs | Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. |
3,891 | import cv2
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop
from basicsr.utils import img2tensor, scandir
The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_lmdb` function. Write a Python function `def paired_paths_from_lmdb(folders, keys)` to solve the following problem:
Generate paired paths from lmdb files. Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: lq.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, 3)compression level, separated by a white space. Example: `baboon.png (120,125,3) 1` We use the image name without extension as the lmdb key. Note that we use the same key for the corresponding lq and gt images. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. Note that this key is different from lmdb keys. Returns: list[str]: Returned path list.
Here is the function:
def paired_paths_from_lmdb(folders, keys):
"""Generate paired paths from lmdb files.
Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
lq.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records
1)image name (with extension),
2)image shape,
3)compression level, separated by a white space.
Example: `baboon.png (120,125,3) 1`
We use the image name without extension as the lmdb key.
Note that we use the same key for the corresponding lq and gt images.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
Note that this key is different from lmdb keys.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')):
raise ValueError(
f'{input_key} folder and {gt_key} folder should both in lmdb '
f'formats. But received {input_key}: {input_folder}; '
f'{gt_key}: {gt_folder}')
# ensure that the two meta_info files are the same
with open(osp.join(input_folder, 'meta_info.txt')) as fin:
input_lmdb_keys = [line.split('.')[0] for line in fin]
with open(osp.join(gt_folder, 'meta_info.txt')) as fin:
gt_lmdb_keys = [line.split('.')[0] for line in fin]
if set(input_lmdb_keys) != set(gt_lmdb_keys):
raise ValueError(
f'Keys in {input_key}_folder and {gt_key}_folder are different.')
else:
paths = []
for lmdb_key in sorted(input_lmdb_keys):
paths.append(
dict([(f'{input_key}_path', lmdb_key),
(f'{gt_key}_path', lmdb_key)]))
return paths | Generate paired paths from lmdb files. Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: lq.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, 3)compression level, separated by a white space. Example: `baboon.png (120,125,3) 1` We use the image name without extension as the lmdb key. Note that we use the same key for the corresponding lq and gt images. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. Note that this key is different from lmdb keys. Returns: list[str]: Returned path list. |
3,892 | import cv2
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop
from basicsr.utils import img2tensor, scandir
The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_meta_info_file` function. Write a Python function `def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl)` to solve the following problem:
Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
Here is the function:
def paired_paths_from_meta_info_file(folders, keys, meta_info_file,
filename_tmpl):
"""Generate paired paths from an meta information file.
Each line in the meta information file contains the image names and
image shape (usually for gt), separated by a white space.
Example of an meta information file:
```
0001_s001.png (480,480,3)
0001_s002.png (480,480,3)
```
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
meta_info_file (str): Path to the meta information file.
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
with open(meta_info_file, 'r') as fin:
gt_names = [line.split(' ')[0] for line in fin]
paths = []
for gt_name in gt_names:
basename, ext = osp.splitext(osp.basename(gt_name))
input_name = f'{filename_tmpl.format(basename)}{ext}'
input_path = osp.join(input_folder, input_name)
gt_path = osp.join(gt_folder, gt_name)
paths.append(
dict([(f'{input_key}_path', input_path),
(f'{gt_key}_path', gt_path)]))
return paths | Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. |
3,893 | import cv2
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop
from basicsr.utils import img2tensor, scandir
The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_folder` function. Write a Python function `def paired_paths_from_folder(folders, keys, filename_tmpl)` to solve the following problem:
Generate paired paths from folders. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
Here is the function:
def paired_paths_from_folder(folders, keys, filename_tmpl):
"""Generate paired paths from folders.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
input_paths = list(scandir(input_folder))
gt_paths = list(scandir(gt_folder))
assert len(input_paths) == len(gt_paths), (
f'{input_key} and {gt_key} datasets have different number of images: '
f'{len(input_paths)}, {len(gt_paths)}.')
paths = []
for idx in range(len(gt_paths)):
gt_path = gt_paths[idx]
basename, ext = osp.splitext(osp.basename(gt_path))
input_path = input_paths[idx]
basename_input, ext_input = osp.splitext(osp.basename(input_path))
input_name = f'{filename_tmpl.format(basename)}{ext_input}'
input_path = osp.join(input_folder, input_name)
assert input_name in input_paths, (f'{input_name} is not in '
f'{input_key}_paths.')
gt_path = osp.join(gt_folder, gt_path)
paths.append(
dict([(f'{input_key}_path', input_path),
(f'{gt_key}_path', gt_path)]))
return paths | Generate paired paths from folders. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. |
3,896 | import cv2
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop
from basicsr.utils import img2tensor, scandir
def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
"""Generate Gaussian kernel used in `duf_downsample`.
Args:
kernel_size (int): Kernel size. Default: 13.
sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
Returns:
np.array: The Gaussian kernel.
"""
from scipy.ndimage import filters as filters
kernel = np.zeros((kernel_size, kernel_size))
# set element at the middle to one, a dirac delta
kernel[kernel_size // 2, kernel_size // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter
return filters.gaussian_filter(kernel, sigma)
The provided code snippet includes necessary dependencies for implementing the `duf_downsample` function. Write a Python function `def duf_downsample(x, kernel_size=13, scale=4)` to solve the following problem:
Downsamping with Gaussian kernel used in the DUF official code. Args: x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). kernel_size (int): Kernel size. Default: 13. scale (int): Downsampling factor. Supported scale: (2, 3, 4). Default: 4. Returns: Tensor: DUF downsampled frames.
Here is the function:
def duf_downsample(x, kernel_size=13, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code.
Args:
x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
kernel_size (int): Kernel size. Default: 13.
scale (int): Downsampling factor. Supported scale: (2, 3, 4).
Default: 4.
Returns:
Tensor: DUF downsampled frames.
"""
assert scale in (2, 3,
4), f'Only support scale (2, 3, 4), but got {scale}.'
squeeze_flag = False
if x.ndim == 4:
squeeze_flag = True
x = x.unsqueeze(0)
b, t, c, h, w = x.size()
x = x.view(-1, 1, h, w)
pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect')
gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(
0).unsqueeze(0)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(b, t, c, x.size(2), x.size(3))
if squeeze_flag:
x = x.squeeze(0)
return x | Downsamping with Gaussian kernel used in the DUF official code. Args: x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). kernel_size (int): Kernel size. Default: 13. scale (int): Downsampling factor. Supported scale: (2, 3, 4). Default: 4. Returns: Tensor: DUF downsampled frames. |
3,897 | import functools
from torch.nn import functional as F
def weight_reduce_loss(loss, weight=None, reduction='mean'):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
'none', 'mean' and 'sum'. Default: 'mean'.
Returns:
Tensor: Loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over weight region
elif reduction == 'mean':
if weight.size(1) > 1:
weight = weight.sum()
else:
weight = weight.sum() * loss.size(1)
loss = loss.sum() / weight
return loss
The provided code snippet includes necessary dependencies for implementing the `weighted_loss` function. Write a Python function `def weighted_loss(loss_func)` to solve the following problem:
Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.)
Here is the function:
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
**kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred, target, weight=None, reduction='mean', **kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction)
return loss
return wrapper | Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) |
3,898 | import torch
from torch import nn as nn
from torch.nn import functional as F
import numpy as np
from basicsr.models.losses.loss_util import weighted_loss
def l1_loss(pred, target):
return F.l1_loss(pred, target, reduction='none') | null |
3,899 | import torch
from torch import nn as nn
from torch.nn import functional as F
import numpy as np
from basicsr.models.losses.loss_util import weighted_loss
def mse_loss(pred, target):
return F.mse_loss(pred, target, reduction='none') | null |
3,900 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AvgPool2d(nn.Module):
def __init__(self, kernel_size=None, base_size=None, auto_pad=True, fast_imp=False, train_size=None):
super().__init__()
self.kernel_size = kernel_size
self.base_size = base_size
self.auto_pad = auto_pad
# only used for fast implementation
self.fast_imp = fast_imp
self.rs = [5, 4, 3, 2, 1]
self.max_r1 = self.rs[0]
self.max_r2 = self.rs[0]
self.train_size = train_size
def extra_repr(self) -> str:
return 'kernel_size={}, base_size={}, stride={}, fast_imp={}'.format(
self.kernel_size, self.base_size, self.kernel_size, self.fast_imp
)
def forward(self, x):
if self.kernel_size is None and self.base_size:
train_size = self.train_size
if isinstance(self.base_size, int):
self.base_size = (self.base_size, self.base_size)
self.kernel_size = list(self.base_size)
self.kernel_size[0] = x.shape[2] * self.base_size[0] // train_size[-2]
self.kernel_size[1] = x.shape[3] * self.base_size[1] // train_size[-1]
# only used for fast implementation
self.max_r1 = max(1, self.rs[0] * x.shape[2] // train_size[-2])
self.max_r2 = max(1, self.rs[0] * x.shape[3] // train_size[-1])
if self.kernel_size[0] >= x.size(-2) and self.kernel_size[1] >= x.size(-1):
return F.adaptive_avg_pool2d(x, 1)
if self.fast_imp: # Non-equivalent implementation but faster
h, w = x.shape[2:]
if self.kernel_size[0] >= h and self.kernel_size[1] >= w:
out = F.adaptive_avg_pool2d(x, 1)
else:
r1 = [r for r in self.rs if h % r == 0][0]
r2 = [r for r in self.rs if w % r == 0][0]
# reduction_constraint
r1 = min(self.max_r1, r1)
r2 = min(self.max_r2, r2)
s = x[:, :, ::r1, ::r2].cumsum(dim=-1).cumsum(dim=-2)
n, c, h, w = s.shape
k1, k2 = min(h - 1, self.kernel_size[0] // r1), min(w - 1, self.kernel_size[1] // r2)
out = (s[:, :, :-k1, :-k2] - s[:, :, :-k1, k2:] - s[:, :, k1:, :-k2] + s[:, :, k1:, k2:]) / (k1 * k2)
out = torch.nn.functional.interpolate(out, scale_factor=(r1, r2))
else:
n, c, h, w = x.shape
s = x.cumsum(dim=-1).cumsum_(dim=-2)
s = torch.nn.functional.pad(s, (1, 0, 1, 0)) # pad 0 for convenience
k1, k2 = min(h, self.kernel_size[0]), min(w, self.kernel_size[1])
s1, s2, s3, s4 = s[:, :, :-k1, :-k2], s[:, :, :-k1, k2:], s[:, :, k1:, :-k2], s[:, :, k1:, k2:]
out = s4 + s1 - s2 - s3
out = out / (k1 * k2)
if self.auto_pad:
n, c, h, w = x.shape
_h, _w = out.shape[2:]
# print(x.shape, self.kernel_size)
pad2d = ((w - _w) // 2, (w - _w + 1) // 2, (h - _h) // 2, (h - _h + 1) // 2)
out = torch.nn.functional.pad(out, pad2d, mode='replicate')
return out
def replace_layers(model, base_size, train_size, fast_imp, **kwargs):
for n, m in model.named_children():
if len(list(m.children())) > 0:
## compound module, go inside it
replace_layers(m, base_size, train_size, fast_imp, **kwargs)
if isinstance(m, nn.AdaptiveAvgPool2d):
pool = AvgPool2d(base_size=base_size, fast_imp=fast_imp, train_size=train_size)
assert m.output_size == 1
setattr(model, n, pool) | null |
3,901 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
The provided code snippet includes necessary dependencies for implementing the `default_init_weights` function. Write a Python function `def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs)` to solve the following problem:
Initialize network weights. Args: module_list (list[nn.Module] | nn.Module): Modules to be initialized. scale (float): Scale initialized weights, especially for residual blocks. Default: 1. bias_fill (float): The value to fill bias. Default: 0 kwargs (dict): Other arguments for initialization function.
Here is the function:
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill) | Initialize network weights. Args: module_list (list[nn.Module] | nn.Module): Modules to be initialized. scale (float): Scale initialized weights, especially for residual blocks. Default: 1. bias_fill (float): The value to fill bias. Default: 0 kwargs (dict): Other arguments for initialization function. |
3,902 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
The provided code snippet includes necessary dependencies for implementing the `make_layer` function. Write a Python function `def make_layer(basic_block, num_basic_block, **kwarg)` to solve the following problem:
Make layers by stacking the same blocks. Args: basic_block (nn.module): nn.module class for basic block. num_basic_block (int): number of blocks. Returns: nn.Sequential: Stacked blocks in nn.Sequential.
Here is the function:
def make_layer(basic_block, num_basic_block, **kwarg):
"""Make layers by stacking the same blocks.
Args:
basic_block (nn.module): nn.module class for basic block.
num_basic_block (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_basic_block):
layers.append(basic_block(**kwarg))
return nn.Sequential(*layers) | Make layers by stacking the same blocks. Args: basic_block (nn.module): nn.module class for basic block. num_basic_block (int): number of blocks. Returns: nn.Sequential: Stacked blocks in nn.Sequential. |
3,903 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
The provided code snippet includes necessary dependencies for implementing the `flow_warp` function. Write a Python function `def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True)` to solve the following problem:
Warp an image or feature map with optical flow. Args: x (Tensor): Tensor with size (n, c, h, w). flow (Tensor): Tensor with size (n, h, w, 2), normal value. interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. padding_mode (str): 'zeros' or 'border' or 'reflection'. Default: 'zeros'. align_corners (bool): Before pytorch 1.3, the default value is align_corners=True. After pytorch 1.3, the default value is align_corners=False. Here, we use the True as default. Returns: Tensor: Warped image or feature map.
Here is the function:
def flow_warp(x,
flow,
interp_mode='bilinear',
padding_mode='zeros',
align_corners=True):
"""Warp an image or feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2), normal value.
interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
padding_mode (str): 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Before pytorch 1.3, the default value is
align_corners=True. After pytorch 1.3, the default value is
align_corners=False. Here, we use the True as default.
Returns:
Tensor: Warped image or feature map.
"""
assert x.size()[-2:] == flow.size()[1:3]
_, _, h, w = x.size()
# create mesh grid
grid_y, grid_x = torch.meshgrid(
torch.arange(0, h).type_as(x),
torch.arange(0, w).type_as(x))
grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
grid.requires_grad = False
vgrid = grid + flow
# scale grid to [-1,1]
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
output = F.grid_sample(
x,
vgrid_scaled,
mode=interp_mode,
padding_mode=padding_mode,
align_corners=align_corners)
# TODO, what if align_corners=False
return output | Warp an image or feature map with optical flow. Args: x (Tensor): Tensor with size (n, c, h, w). flow (Tensor): Tensor with size (n, h, w, 2), normal value. interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. padding_mode (str): 'zeros' or 'border' or 'reflection'. Default: 'zeros'. align_corners (bool): Before pytorch 1.3, the default value is align_corners=True. After pytorch 1.3, the default value is align_corners=False. Here, we use the True as default. Returns: Tensor: Warped image or feature map. |
3,904 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
The provided code snippet includes necessary dependencies for implementing the `resize_flow` function. Write a Python function `def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False)` to solve the following problem:
Resize a flow according to ratio or shape. Args: flow (Tensor): Precomputed flow. shape [N, 2, H, W]. size_type (str): 'ratio' or 'shape'. sizes (list[int | float]): the ratio for resizing or the final output shape. 1) The order of ratio should be [ratio_h, ratio_w]. For downsampling, the ratio should be smaller than 1.0 (i.e., ratio < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., ratio > 1.0). 2) The order of output_size should be [out_h, out_w]. interp_mode (str): The mode of interpolation for resizing. Default: 'bilinear'. align_corners (bool): Whether align corners. Default: False. Returns: Tensor: Resized flow.
Here is the function:
def resize_flow(flow,
size_type,
sizes,
interp_mode='bilinear',
align_corners=False):
"""Resize a flow according to ratio or shape.
Args:
flow (Tensor): Precomputed flow. shape [N, 2, H, W].
size_type (str): 'ratio' or 'shape'.
sizes (list[int | float]): the ratio for resizing or the final output
shape.
1) The order of ratio should be [ratio_h, ratio_w]. For
downsampling, the ratio should be smaller than 1.0 (i.e., ratio
< 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
ratio > 1.0).
2) The order of output_size should be [out_h, out_w].
interp_mode (str): The mode of interpolation for resizing.
Default: 'bilinear'.
align_corners (bool): Whether align corners. Default: False.
Returns:
Tensor: Resized flow.
"""
_, _, flow_h, flow_w = flow.size()
if size_type == 'ratio':
output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
elif size_type == 'shape':
output_h, output_w = sizes[0], sizes[1]
else:
raise ValueError(
f'Size type should be ratio or shape, but got type {size_type}.')
input_flow = flow.clone()
ratio_h = output_h / flow_h
ratio_w = output_w / flow_w
input_flow[:, 0, :, :] *= ratio_w
input_flow[:, 1, :, :] *= ratio_h
resized_flow = F.interpolate(
input=input_flow,
size=(output_h, output_w),
mode=interp_mode,
align_corners=align_corners)
return resized_flow | Resize a flow according to ratio or shape. Args: flow (Tensor): Precomputed flow. shape [N, 2, H, W]. size_type (str): 'ratio' or 'shape'. sizes (list[int | float]): the ratio for resizing or the final output shape. 1) The order of ratio should be [ratio_h, ratio_w]. For downsampling, the ratio should be smaller than 1.0 (i.e., ratio < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., ratio > 1.0). 2) The order of output_size should be [out_h, out_w]. interp_mode (str): The mode of interpolation for resizing. Default: 'bilinear'. align_corners (bool): Whether align corners. Default: False. Returns: Tensor: Resized flow. |
3,905 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
The provided code snippet includes necessary dependencies for implementing the `pixel_unshuffle` function. Write a Python function `def pixel_unshuffle(x, scale)` to solve the following problem:
Pixel unshuffle. Args: x (Tensor): Input feature with shape (b, c, hh, hw). scale (int): Downsample ratio. Returns: Tensor: the pixel unshuffled feature.
Here is the function:
def pixel_unshuffle(x, scale):
""" Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
b, c, hh, hw = x.size()
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.view(b, c, h, scale, w, scale)
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w) | Pixel unshuffle. Args: x (Tensor): Input feature with shape (b, c, hh, hw). scale (int): Downsample ratio. Returns: Tensor: the pixel unshuffled feature. |
3,906 | import math
import torch
from torch import nn as nn
from torch.nn import functional as F
from torch.nn import init as init
from torch.nn.modules.batchnorm import _BatchNorm
from basicsr.utils import get_root_logger
import time
def measure_inference_speed(model, data, max_iter=200, log_interval=50):
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i in range(max_iter):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(*data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps | null |
3,909 | import math
import requests
from tqdm import tqdm
from .misc import sizeof_fmt
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response,
destination,
file_size=None,
chunk_size=32768):
if file_size is not None:
pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk')
readable_file_size = sizeof_fmt(file_size)
else:
pbar = None
with open(destination, 'wb') as f:
downloaded_size = 0
for chunk in response.iter_content(chunk_size):
downloaded_size += chunk_size
if pbar is not None:
pbar.update(1)
pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} '
f'/ {readable_file_size}')
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if pbar is not None:
pbar.close()
The provided code snippet includes necessary dependencies for implementing the `download_file_from_google_drive` function. Write a Python function `def download_file_from_google_drive(file_id, save_path)` to solve the following problem:
Download files from google drive. Ref: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501 Args: file_id (str): File id. save_path (str): Save path.
Here is the function:
def download_file_from_google_drive(file_id, save_path):
"""Download files from google drive.
Ref:
https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501
Args:
file_id (str): File id.
save_path (str): Save path.
"""
session = requests.Session()
URL = 'https://docs.google.com/uc?export=download'
params = {'id': file_id}
response = session.get(URL, params=params, stream=True)
token = get_confirm_token(response)
if token:
params['confirm'] = token
response = session.get(URL, params=params, stream=True)
# get file size
response_file_size = session.get(
URL, params=params, stream=True, headers={'Range': 'bytes=0-2'})
if 'Content-Range' in response_file_size.headers:
file_size = int(
response_file_size.headers['Content-Range'].split('/')[1])
else:
file_size = None
save_response_content(response, save_path, file_size) | Download files from google drive. Ref: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive # noqa E501 Args: file_id (str): File id. save_path (str): Save path. |
3,910 | import math
import numpy as np
import torch
def calculate_weights_indices(in_length, out_length, scale, kernel,
kernel_width, antialiasing):
"""Calculate weights and indices, used for imresize function.
Args:
in_length (int): Input length.
out_length (int): Output length.
scale (float): Scale factor.
kernel_width (int): Kernel width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
"""
if (scale < 1) and antialiasing:
# Use a modified kernel (larger kernel width) to simultaneously
# interpolate and antialias
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5 + scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
p = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, p) + torch.linspace(
0, p - 1, p).view(1, p).expand(out_length, p)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, p) - indices
# apply cubic kernel
if (scale < 1) and antialiasing:
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, p)
# If a column in weights is all zero, get rid of it. only consider the
# first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, p - 2)
weights = weights.narrow(1, 1, p - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, p - 2)
weights = weights.narrow(1, 0, p - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
The provided code snippet includes necessary dependencies for implementing the `imresize` function. Write a Python function `def imresize(img, scale, antialiasing=True)` to solve the following problem:
imresize function same as MATLAB. It now only supports bicubic. The same scale applies for both height and width. Args: img (Tensor | Numpy array): Tensor: Input image with shape (c, h, w), [0, 1] range. Numpy: Input image with shape (h, w, c), [0, 1] range. scale (float): Scale factor. The same scale applies for both height and width. antialisaing (bool): Whether to apply anti-aliasing when downsampling. Default: True. Returns: Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
Here is the function:
def imresize(img, scale, antialiasing=True):
"""imresize function same as MATLAB.
It now only supports bicubic.
The same scale applies for both height and width.
Args:
img (Tensor | Numpy array):
Tensor: Input image with shape (c, h, w), [0, 1] range.
Numpy: Input image with shape (h, w, c), [0, 1] range.
scale (float): Scale factor. The same scale applies for both height
and width.
antialisaing (bool): Whether to apply anti-aliasing when downsampling.
Default: True.
Returns:
Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round.
"""
if type(img).__module__ == np.__name__: # numpy type
numpy_type = True
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
else:
numpy_type = False
in_c, in_h, in_w = img.size()
out_h, out_w = math.ceil(in_h * scale), math.ceil(in_w * scale)
kernel_width = 4
kernel = 'cubic'
# get weights and indices
weights_h, indices_h, sym_len_hs, sym_len_he = calculate_weights_indices(
in_h, out_h, scale, kernel, kernel_width, antialiasing)
weights_w, indices_w, sym_len_ws, sym_len_we = calculate_weights_indices(
in_w, out_w, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_c, in_h + sym_len_hs + sym_len_he, in_w)
img_aug.narrow(1, sym_len_hs, in_h).copy_(img)
sym_patch = img[:, :sym_len_hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_he:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_hs + in_h, sym_len_he).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_c, out_h, in_w)
kernel_width = weights_h.size(1)
for i in range(out_h):
idx = int(indices_h[i][0])
for j in range(in_c):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(
0, 1).mv(weights_h[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_c, out_h, in_w + sym_len_ws + sym_len_we)
out_1_aug.narrow(2, sym_len_ws, in_w).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_we:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_ws + in_w, sym_len_we).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_c, out_h, out_w)
kernel_width = weights_w.size(1)
for i in range(out_w):
idx = int(indices_w[i][0])
for j in range(in_c):
out_2[j, :, i] = out_1_aug[j, :,
idx:idx + kernel_width].mv(weights_w[i])
if numpy_type:
out_2 = out_2.numpy().transpose(1, 2, 0)
return out_2 | imresize function same as MATLAB. It now only supports bicubic. The same scale applies for both height and width. Args: img (Tensor | Numpy array): Tensor: Input image with shape (c, h, w), [0, 1] range. Numpy: Input image with shape (h, w, c), [0, 1] range. scale (float): Scale factor. The same scale applies for both height and width. antialisaing (bool): Whether to apply anti-aliasing when downsampling. Default: True. Returns: Tensor: Output image with shape (c, h, w), [0, 1] range, w/o round. |
3,911 | import math
import numpy as np
import torch
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, '
f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, '
f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
The provided code snippet includes necessary dependencies for implementing the `rgb2ycbcr` function. Write a Python function `def rgb2ycbcr(img, y_only=False)` to solve the following problem:
Convert a RGB image to YCbCr image. This function produces the same results as Matlab's `rgb2ycbcr` function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. y_only (bool): Whether to only return Y channel. Default: False. Returns: ndarray: The converted YCbCr image. The output image has the same type and range as input image.
Here is the function:
def rgb2ycbcr(img, y_only=False):
"""Convert a RGB image to YCbCr image.
This function produces the same results as Matlab's `rgb2ycbcr` function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
y_only (bool): Whether to only return Y channel. Default: False.
Returns:
ndarray: The converted YCbCr image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img)
if y_only:
out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
else:
out_img = np.matmul(
img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) + [16, 128, 128]
out_img = _convert_output_type_range(out_img, img_type)
return out_img | Convert a RGB image to YCbCr image. This function produces the same results as Matlab's `rgb2ycbcr` function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. y_only (bool): Whether to only return Y channel. Default: False. Returns: ndarray: The converted YCbCr image. The output image has the same type and range as input image. |
3,912 | import math
import numpy as np
import torch
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, '
f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, '
f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
The provided code snippet includes necessary dependencies for implementing the `ycbcr2rgb` function. Write a Python function `def ycbcr2rgb(img)` to solve the following problem:
Convert a YCbCr image to RGB image. This function produces the same results as Matlab's ycbcr2rgb function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted RGB image. The output image has the same type and range as input image.
Here is the function:
def ycbcr2rgb(img):
"""Convert a YCbCr image to RGB image.
This function produces the same results as Matlab's ycbcr2rgb function.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted RGB image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
[0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [
-222.921, 135.576, -276.836
] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img | Convert a YCbCr image to RGB image. This function produces the same results as Matlab's ycbcr2rgb function. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted RGB image. The output image has the same type and range as input image. |
3,913 | import math
import numpy as np
import torch
def _convert_input_type_range(img):
"""Convert the type and range of the input image.
It converts the input image to np.float32 type and range of [0, 1].
It is mainly used for pre-processing the input image in colorspace
convertion functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
(ndarray): The converted image with type of np.float32 and range of
[0, 1].
"""
img_type = img.dtype
img = img.astype(np.float32)
if img_type == np.float32:
pass
elif img_type == np.uint8:
img /= 255.
else:
raise TypeError('The img type should be np.float32 or np.uint8, '
f'but got {img_type}')
return img
def _convert_output_type_range(img, dst_type):
"""Convert the type and range of the image according to dst_type.
It converts the image to desired type and range. If `dst_type` is np.uint8,
images will be converted to np.uint8 type with range [0, 255]. If
`dst_type` is np.float32, it converts the image to np.float32 type with
range [0, 1].
It is mainly used for post-processing images in colorspace convertion
functions such as rgb2ycbcr and ycbcr2rgb.
Args:
img (ndarray): The image to be converted with np.float32 type and
range [0, 255].
dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
converts the image to np.uint8 type with range [0, 255]. If
dst_type is np.float32, it converts the image to np.float32 type
with range [0, 1].
Returns:
(ndarray): The converted image with desired type and range.
"""
if dst_type not in (np.uint8, np.float32):
raise TypeError('The dst_type should be np.float32 or np.uint8, '
f'but got {dst_type}')
if dst_type == np.uint8:
img = img.round()
else:
img /= 255.
return img.astype(dst_type)
The provided code snippet includes necessary dependencies for implementing the `ycbcr2bgr` function. Write a Python function `def ycbcr2bgr(img)` to solve the following problem:
Convert a YCbCr image to BGR image. The bgr version of ycbcr2rgb. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted BGR image. The output image has the same type and range as input image.
Here is the function:
def ycbcr2bgr(img):
"""Convert a YCbCr image to BGR image.
The bgr version of ycbcr2rgb.
It implements the ITU-R BT.601 conversion for standard-definition
television. See more details in
https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
In OpenCV, it implements a JPEG conversion. See more details in
https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
Args:
img (ndarray): The input image. It accepts:
1. np.uint8 type with range [0, 255];
2. np.float32 type with range [0, 1].
Returns:
ndarray: The converted BGR image. The output image has the same type
and range as input image.
"""
img_type = img.dtype
img = _convert_input_type_range(img) * 255
out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
[0.00791071, -0.00153632, 0],
[0, -0.00318811, 0.00625893]]) * 255.0 + [
-276.836, 135.576, -222.921
] # noqa: E126
out_img = _convert_output_type_range(out_img, img_type)
return out_img | Convert a YCbCr image to BGR image. The bgr version of ycbcr2rgb. It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`. In OpenCV, it implements a JPEG conversion. See more details in https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion. Args: img (ndarray): The input image. It accepts: 1. np.uint8 type with range [0, 255]; 2. np.float32 type with range [0, 1]. Returns: ndarray: The converted BGR image. The output image has the same type and range as input image. |
3,914 | import argparse
from os import path as osp
from basicsr.utils import scandir
from basicsr.utils.lmdb_util import make_lmdb_from_imgs
def prepare_keys(folder_path, suffix='png'):
def make_lmdb_from_imgs(data_path,
lmdb_path,
img_path_list,
keys,
batch=5000,
compress_level=1,
multiprocessing_read=False,
n_thread=40,
map_size=None):
def create_lmdb_for_reds():
# folder_path = './datasets/REDS/val/sharp_300'
# lmdb_path = './datasets/REDS/val/sharp_300.lmdb'
# img_path_list, keys = prepare_keys(folder_path, 'png')
# make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
#
# folder_path = './datasets/REDS/val/blur_300'
# lmdb_path = './datasets/REDS/val/blur_300.lmdb'
# img_path_list, keys = prepare_keys(folder_path, 'jpg')
# make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/REDS/train/train_sharp'
lmdb_path = './datasets/REDS/train/train_sharp.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'png')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/REDS/train/train_blur_jpeg'
lmdb_path = './datasets/REDS/train/train_blur_jpeg.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'jpg')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) | null |
3,915 | import argparse
from os import path as osp
from basicsr.utils import scandir
from basicsr.utils.lmdb_util import make_lmdb_from_imgs
def prepare_keys(folder_path, suffix='png'):
"""Prepare image path list and keys for DIV2K dataset.
Args:
folder_path (str): Folder path.
Returns:
list[str]: Image path list.
list[str]: Key list.
"""
print('Reading image path list ...')
img_path_list = sorted(
list(scandir(folder_path, suffix=suffix, recursive=False)))
keys = [img_path.split('.{}'.format(suffix))[0] for img_path in sorted(img_path_list)]
return img_path_list, keys
def make_lmdb_from_imgs(data_path,
lmdb_path,
img_path_list,
keys,
batch=5000,
compress_level=1,
multiprocessing_read=False,
n_thread=40,
map_size=None):
"""Make lmdb from images.
Contents of lmdb. The file structure is:
example.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records 1)image name (with extension),
2)image shape, and 3)compression level, separated by a white space.
For example, the meta information could be:
`000_00000000.png (720,1280,3) 1`, which means:
1) image name (with extension): 000_00000000.png;
2) image shape: (720,1280,3);
3) compression level: 1
We use the image name without extension as the lmdb key.
If `multiprocessing_read` is True, it will read all the images to memory
using multiprocessing. Thus, your server needs to have enough memory.
Args:
data_path (str): Data path for reading images.
lmdb_path (str): Lmdb save path.
img_path_list (str): Image path list.
keys (str): Used for lmdb keys.
batch (int): After processing batch images, lmdb commits.
Default: 5000.
compress_level (int): Compress level when encoding images. Default: 1.
multiprocessing_read (bool): Whether use multiprocessing to read all
the images to memory. Default: False.
n_thread (int): For multiprocessing.
map_size (int | None): Map size for lmdb env. If None, use the
estimated size from images. Default: None
"""
assert len(img_path_list) == len(keys), (
'img_path_list and keys should have the same length, '
f'but got {len(img_path_list)} and {len(keys)}')
print(f'Create lmdb for {data_path}, save to {lmdb_path}...')
print(f'Total images: {len(img_path_list)}')
if not lmdb_path.endswith('.lmdb'):
raise ValueError("lmdb_path must end with '.lmdb'.")
if osp.exists(lmdb_path):
print(f'Folder {lmdb_path} already exists. Exit.')
sys.exit(1)
if multiprocessing_read:
# read all the images to memory (multiprocessing)
dataset = {} # use dict to keep the order for multiprocessing
shapes = {}
print(f'Read images with multiprocessing, #thread: {n_thread} ...')
pbar = tqdm(total=len(img_path_list), unit='image')
def callback(arg):
"""get the image data and update pbar."""
key, dataset[key], shapes[key] = arg
pbar.update(1)
pbar.set_description(f'Read {key}')
pool = Pool(n_thread)
for path, key in zip(img_path_list, keys):
pool.apply_async(
read_img_worker,
args=(osp.join(data_path, path), key, compress_level),
callback=callback)
pool.close()
pool.join()
pbar.close()
print(f'Finish reading {len(img_path_list)} images.')
# create lmdb environment
if map_size is None:
# obtain data size for one image
img = cv2.imread(
osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED)
_, img_byte = cv2.imencode(
'.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
data_size_per_img = img_byte.nbytes
print('Data size per image is: ', data_size_per_img)
data_size = data_size_per_img * len(img_path_list)
map_size = data_size * 10
env = lmdb.open(lmdb_path, map_size=map_size)
# write data to lmdb
pbar = tqdm(total=len(img_path_list), unit='chunk')
txn = env.begin(write=True)
txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w')
for idx, (path, key) in enumerate(zip(img_path_list, keys)):
pbar.update(1)
pbar.set_description(f'Write {key}')
key_byte = key.encode('ascii')
if multiprocessing_read:
img_byte = dataset[key]
h, w, c = shapes[key]
else:
_, img_byte, img_shape = read_img_worker(
osp.join(data_path, path), key, compress_level)
h, w, c = img_shape
txn.put(key_byte, img_byte)
# write meta information
txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n')
if idx % batch == 0:
txn.commit()
txn = env.begin(write=True)
pbar.close()
txn.commit()
env.close()
txt_file.close()
print('\nFinish writing lmdb.')
def create_lmdb_for_gopro():
folder_path = './datasets/GoPro/train/blur_crops'
lmdb_path = './datasets/GoPro/train/blur_crops.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'png')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/GoPro/train/sharp_crops'
lmdb_path = './datasets/GoPro/train/sharp_crops.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'png')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
# folder_path = './datasets/GoPro/test/target'
# lmdb_path = './datasets/GoPro/test/target.lmdb'
# img_path_list, keys = prepare_keys(folder_path, 'png')
# make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
# folder_path = './datasets/GoPro/test/input'
# lmdb_path = './datasets/GoPro/test/input.lmdb'
# img_path_list, keys = prepare_keys(folder_path, 'png')
# make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) | null |
3,916 | import argparse
from os import path as osp
from basicsr.utils import scandir
from basicsr.utils.lmdb_util import make_lmdb_from_imgs
def prepare_keys(folder_path, suffix='png'):
"""Prepare image path list and keys for DIV2K dataset.
Args:
folder_path (str): Folder path.
Returns:
list[str]: Image path list.
list[str]: Key list.
"""
print('Reading image path list ...')
img_path_list = sorted(
list(scandir(folder_path, suffix=suffix, recursive=False)))
keys = [img_path.split('.{}'.format(suffix))[0] for img_path in sorted(img_path_list)]
return img_path_list, keys
def make_lmdb_from_imgs(data_path,
lmdb_path,
img_path_list,
keys,
batch=5000,
compress_level=1,
multiprocessing_read=False,
n_thread=40,
map_size=None):
"""Make lmdb from images.
Contents of lmdb. The file structure is:
example.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records 1)image name (with extension),
2)image shape, and 3)compression level, separated by a white space.
For example, the meta information could be:
`000_00000000.png (720,1280,3) 1`, which means:
1) image name (with extension): 000_00000000.png;
2) image shape: (720,1280,3);
3) compression level: 1
We use the image name without extension as the lmdb key.
If `multiprocessing_read` is True, it will read all the images to memory
using multiprocessing. Thus, your server needs to have enough memory.
Args:
data_path (str): Data path for reading images.
lmdb_path (str): Lmdb save path.
img_path_list (str): Image path list.
keys (str): Used for lmdb keys.
batch (int): After processing batch images, lmdb commits.
Default: 5000.
compress_level (int): Compress level when encoding images. Default: 1.
multiprocessing_read (bool): Whether use multiprocessing to read all
the images to memory. Default: False.
n_thread (int): For multiprocessing.
map_size (int | None): Map size for lmdb env. If None, use the
estimated size from images. Default: None
"""
assert len(img_path_list) == len(keys), (
'img_path_list and keys should have the same length, '
f'but got {len(img_path_list)} and {len(keys)}')
print(f'Create lmdb for {data_path}, save to {lmdb_path}...')
print(f'Total images: {len(img_path_list)}')
if not lmdb_path.endswith('.lmdb'):
raise ValueError("lmdb_path must end with '.lmdb'.")
if osp.exists(lmdb_path):
print(f'Folder {lmdb_path} already exists. Exit.')
sys.exit(1)
if multiprocessing_read:
# read all the images to memory (multiprocessing)
dataset = {} # use dict to keep the order for multiprocessing
shapes = {}
print(f'Read images with multiprocessing, #thread: {n_thread} ...')
pbar = tqdm(total=len(img_path_list), unit='image')
def callback(arg):
"""get the image data and update pbar."""
key, dataset[key], shapes[key] = arg
pbar.update(1)
pbar.set_description(f'Read {key}')
pool = Pool(n_thread)
for path, key in zip(img_path_list, keys):
pool.apply_async(
read_img_worker,
args=(osp.join(data_path, path), key, compress_level),
callback=callback)
pool.close()
pool.join()
pbar.close()
print(f'Finish reading {len(img_path_list)} images.')
# create lmdb environment
if map_size is None:
# obtain data size for one image
img = cv2.imread(
osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED)
_, img_byte = cv2.imencode(
'.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
data_size_per_img = img_byte.nbytes
print('Data size per image is: ', data_size_per_img)
data_size = data_size_per_img * len(img_path_list)
map_size = data_size * 10
env = lmdb.open(lmdb_path, map_size=map_size)
# write data to lmdb
pbar = tqdm(total=len(img_path_list), unit='chunk')
txn = env.begin(write=True)
txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w')
for idx, (path, key) in enumerate(zip(img_path_list, keys)):
pbar.update(1)
pbar.set_description(f'Write {key}')
key_byte = key.encode('ascii')
if multiprocessing_read:
img_byte = dataset[key]
h, w, c = shapes[key]
else:
_, img_byte, img_shape = read_img_worker(
osp.join(data_path, path), key, compress_level)
h, w, c = img_shape
txn.put(key_byte, img_byte)
# write meta information
txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n')
if idx % batch == 0:
txn.commit()
txn = env.begin(write=True)
pbar.close()
txn.commit()
env.close()
txt_file.close()
print('\nFinish writing lmdb.')
def create_lmdb_for_rain13k():
folder_path = './datasets/Rain13k/train/input'
lmdb_path = './datasets/Rain13k/train/input.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'jpg')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/Rain13k/train/target'
lmdb_path = './datasets/Rain13k/train/target.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'jpg')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) | null |
3,917 | import argparse
from os import path as osp
from basicsr.utils import scandir
from basicsr.utils.lmdb_util import make_lmdb_from_imgs
def prepare_keys(folder_path, suffix='png'):
"""Prepare image path list and keys for DIV2K dataset.
Args:
folder_path (str): Folder path.
Returns:
list[str]: Image path list.
list[str]: Key list.
"""
print('Reading image path list ...')
img_path_list = sorted(
list(scandir(folder_path, suffix=suffix, recursive=False)))
keys = [img_path.split('.{}'.format(suffix))[0] for img_path in sorted(img_path_list)]
return img_path_list, keys
def make_lmdb_from_imgs(data_path,
lmdb_path,
img_path_list,
keys,
batch=5000,
compress_level=1,
multiprocessing_read=False,
n_thread=40,
map_size=None):
"""Make lmdb from images.
Contents of lmdb. The file structure is:
example.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records 1)image name (with extension),
2)image shape, and 3)compression level, separated by a white space.
For example, the meta information could be:
`000_00000000.png (720,1280,3) 1`, which means:
1) image name (with extension): 000_00000000.png;
2) image shape: (720,1280,3);
3) compression level: 1
We use the image name without extension as the lmdb key.
If `multiprocessing_read` is True, it will read all the images to memory
using multiprocessing. Thus, your server needs to have enough memory.
Args:
data_path (str): Data path for reading images.
lmdb_path (str): Lmdb save path.
img_path_list (str): Image path list.
keys (str): Used for lmdb keys.
batch (int): After processing batch images, lmdb commits.
Default: 5000.
compress_level (int): Compress level when encoding images. Default: 1.
multiprocessing_read (bool): Whether use multiprocessing to read all
the images to memory. Default: False.
n_thread (int): For multiprocessing.
map_size (int | None): Map size for lmdb env. If None, use the
estimated size from images. Default: None
"""
assert len(img_path_list) == len(keys), (
'img_path_list and keys should have the same length, '
f'but got {len(img_path_list)} and {len(keys)}')
print(f'Create lmdb for {data_path}, save to {lmdb_path}...')
print(f'Total images: {len(img_path_list)}')
if not lmdb_path.endswith('.lmdb'):
raise ValueError("lmdb_path must end with '.lmdb'.")
if osp.exists(lmdb_path):
print(f'Folder {lmdb_path} already exists. Exit.')
sys.exit(1)
if multiprocessing_read:
# read all the images to memory (multiprocessing)
dataset = {} # use dict to keep the order for multiprocessing
shapes = {}
print(f'Read images with multiprocessing, #thread: {n_thread} ...')
pbar = tqdm(total=len(img_path_list), unit='image')
def callback(arg):
"""get the image data and update pbar."""
key, dataset[key], shapes[key] = arg
pbar.update(1)
pbar.set_description(f'Read {key}')
pool = Pool(n_thread)
for path, key in zip(img_path_list, keys):
pool.apply_async(
read_img_worker,
args=(osp.join(data_path, path), key, compress_level),
callback=callback)
pool.close()
pool.join()
pbar.close()
print(f'Finish reading {len(img_path_list)} images.')
# create lmdb environment
if map_size is None:
# obtain data size for one image
img = cv2.imread(
osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED)
_, img_byte = cv2.imencode(
'.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level])
data_size_per_img = img_byte.nbytes
print('Data size per image is: ', data_size_per_img)
data_size = data_size_per_img * len(img_path_list)
map_size = data_size * 10
env = lmdb.open(lmdb_path, map_size=map_size)
# write data to lmdb
pbar = tqdm(total=len(img_path_list), unit='chunk')
txn = env.begin(write=True)
txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w')
for idx, (path, key) in enumerate(zip(img_path_list, keys)):
pbar.update(1)
pbar.set_description(f'Write {key}')
key_byte = key.encode('ascii')
if multiprocessing_read:
img_byte = dataset[key]
h, w, c = shapes[key]
else:
_, img_byte, img_shape = read_img_worker(
osp.join(data_path, path), key, compress_level)
h, w, c = img_shape
txn.put(key_byte, img_byte)
# write meta information
txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n')
if idx % batch == 0:
txn.commit()
txn = env.begin(write=True)
pbar.close()
txn.commit()
env.close()
txt_file.close()
print('\nFinish writing lmdb.')
The provided code snippet includes necessary dependencies for implementing the `create_lmdb_for_SIDD` function. Write a Python function `def create_lmdb_for_SIDD()` to solve the following problem:
folder_path = './datasets/SIDD/val/input_crops' lmdb_path = './datasets/SIDD/val/input_crops.lmdb' mat_path = './datasets/SIDD/ValidationNoisyBlocksSrgb.mat' if not osp.exists(folder_path): os.makedirs(folder_path) assert osp.exists(mat_path) data = scio.loadmat(mat_path)['ValidationNoisyBlocksSrgb'] N, B, H ,W, C = data.shape data = data.reshape(N*B, H, W, C) for i in tqdm(range(N*B)): cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR)) img_path_list, keys = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/SIDD/val/gt_crops' lmdb_path = './datasets/SIDD/val/gt_crops.lmdb' mat_path = './datasets/SIDD/ValidationGtBlocksSrgb.mat' if not osp.exists(folder_path): os.makedirs(folder_path) assert osp.exists(mat_path) data = scio.loadmat(mat_path)['ValidationGtBlocksSrgb'] N, B, H ,W, C = data.shape data = data.reshape(N*B, H, W, C) for i in tqdm(range(N*B)): cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR)) img_path_list, keys = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
Here is the function:
def create_lmdb_for_SIDD():
folder_path = './datasets/SIDD/train/input_crops'
lmdb_path = './datasets/SIDD/train/input_crops.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'PNG')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/SIDD/train/gt_crops'
lmdb_path = './datasets/SIDD/train/gt_crops.lmdb'
img_path_list, keys = prepare_keys(folder_path, 'PNG')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
#for val
'''
folder_path = './datasets/SIDD/val/input_crops'
lmdb_path = './datasets/SIDD/val/input_crops.lmdb'
mat_path = './datasets/SIDD/ValidationNoisyBlocksSrgb.mat'
if not osp.exists(folder_path):
os.makedirs(folder_path)
assert osp.exists(mat_path)
data = scio.loadmat(mat_path)['ValidationNoisyBlocksSrgb']
N, B, H ,W, C = data.shape
data = data.reshape(N*B, H, W, C)
for i in tqdm(range(N*B)):
cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR))
img_path_list, keys = prepare_keys(folder_path, 'png')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
folder_path = './datasets/SIDD/val/gt_crops'
lmdb_path = './datasets/SIDD/val/gt_crops.lmdb'
mat_path = './datasets/SIDD/ValidationGtBlocksSrgb.mat'
if not osp.exists(folder_path):
os.makedirs(folder_path)
assert osp.exists(mat_path)
data = scio.loadmat(mat_path)['ValidationGtBlocksSrgb']
N, B, H ,W, C = data.shape
data = data.reshape(N*B, H, W, C)
for i in tqdm(range(N*B)):
cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR))
img_path_list, keys = prepare_keys(folder_path, 'png')
make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys)
''' | folder_path = './datasets/SIDD/val/input_crops' lmdb_path = './datasets/SIDD/val/input_crops.lmdb' mat_path = './datasets/SIDD/ValidationNoisyBlocksSrgb.mat' if not osp.exists(folder_path): os.makedirs(folder_path) assert osp.exists(mat_path) data = scio.loadmat(mat_path)['ValidationNoisyBlocksSrgb'] N, B, H ,W, C = data.shape data = data.reshape(N*B, H, W, C) for i in tqdm(range(N*B)): cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR)) img_path_list, keys = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) folder_path = './datasets/SIDD/val/gt_crops' lmdb_path = './datasets/SIDD/val/gt_crops.lmdb' mat_path = './datasets/SIDD/ValidationGtBlocksSrgb.mat' if not osp.exists(folder_path): os.makedirs(folder_path) assert osp.exists(mat_path) data = scio.loadmat(mat_path)['ValidationGtBlocksSrgb'] N, B, H ,W, C = data.shape data = data.reshape(N*B, H, W, C) for i in tqdm(range(N*B)): cv2.imwrite(osp.join(folder_path, 'ValidationBlocksSrgb_{}.png'.format(i)), cv2.cvtColor(data[i,...], cv2.COLOR_RGB2BGR)) img_path_list, keys = prepare_keys(folder_path, 'png') make_lmdb_from_imgs(folder_path, lmdb_path, img_path_list, keys) |
3,918 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
The provided code snippet includes necessary dependencies for implementing the `img2tensor` function. Write a Python function `def img2tensor(imgs, bgr2rgb=True, float32=True)` to solve the following problem:
Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor.
Here is the function:
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32) | Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor. |
3,919 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
The provided code snippet includes necessary dependencies for implementing the `tensor2img` function. Write a Python function `def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1))` to solve the following problem:
Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR.
Here is the function:
def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to [min, max], values will be normalized to [0, 1].
Args:
tensor (Tensor or list[Tensor]): Accept shapes:
1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);
2) 3D Tensor of shape (3/1 x H x W);
3) 2D Tensor of shape (H x W).
Tensor channel should be in RGB order.
rgb2bgr (bool): Whether to change rgb to bgr.
out_type (numpy type): output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple[int]): min and max values for clamp.
Returns:
(Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of
shape (H x W). The channel order is BGR.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
_tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = img_np.transpose(1, 2, 0)
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = img_np.transpose(1, 2, 0)
if img_np.shape[2] == 1: # gray image
img_np = np.squeeze(img_np, axis=2)
elif img_np.shape[2] == 3:
if rgb2bgr:
img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise TypeError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
if len(result) == 1:
result = result[0]
return result | Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR. |
3,920 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
The provided code snippet includes necessary dependencies for implementing the `imfrombytes` function. Write a Python function `def imfrombytes(content, flag='color', float32=False)` to solve the following problem:
Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array.
Here is the function:
def imfrombytes(content, flag='color', float32=False):
"""Read an image from bytes.
Args:
content (bytes): Image bytes got from files or other streams.
flag (str): Flags specifying the color type of a loaded image,
candidates are `color`, `grayscale` and `unchanged`.
float32 (bool): Whether to change to float32., If True, will also norm
to [0, 1]. Default: False.
Returns:
ndarray: Loaded image array.
"""
img_np = np.frombuffer(content, np.uint8)
imread_flags = {
'color': cv2.IMREAD_COLOR,
'grayscale': cv2.IMREAD_GRAYSCALE,
'unchanged': cv2.IMREAD_UNCHANGED
}
if img_np is None:
raise Exception('None .. !!!')
img = cv2.imdecode(img_np, imread_flags[flag])
if float32:
img = img.astype(np.float32) / 255.
return img | Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array. |
3,921 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
def padding(img_lq, img_gt, gt_size):
h, w, _ = img_lq.shape
h_pad = max(0, gt_size - h)
w_pad = max(0, gt_size - w)
if h_pad == 0 and w_pad == 0:
return img_lq, img_gt
img_lq = cv2.copyMakeBorder(img_lq, 0, h_pad, 0, w_pad, cv2.BORDER_REFLECT)
img_gt = cv2.copyMakeBorder(img_gt, 0, h_pad, 0, w_pad, cv2.BORDER_REFLECT)
# print('img_lq', img_lq.shape, img_gt.shape)
return img_lq, img_gt | null |
3,922 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
The provided code snippet includes necessary dependencies for implementing the `imwrite` function. Write a Python function `def imwrite(img, file_path, params=None, auto_mkdir=True)` to solve the following problem:
Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not.
Here is the function:
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params) | Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. |
3,923 | import cv2
import math
import numpy as np
import os
import torch
from torchvision.utils import make_grid
The provided code snippet includes necessary dependencies for implementing the `crop_border` function. Write a Python function `def crop_border(imgs, crop_border)` to solve the following problem:
Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images.
Here is the function:
def crop_border(imgs, crop_border):
"""Crop borders of images.
Args:
imgs (list[ndarray] | ndarray): Images with shape (h, w, c).
crop_border (int): Crop border for each end of height and weight.
Returns:
list[ndarray]: Cropped images.
"""
if crop_border == 0:
return imgs
else:
if isinstance(imgs, list):
return [
v[crop_border:-crop_border, crop_border:-crop_border, ...]
for v in imgs
]
else:
return imgs[crop_border:-crop_border, crop_border:-crop_border,
...] | Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images. |
3,924 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
from .dist_util import master_only
from .logger import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(seed)` to solve the following problem:
Set random seeds.
Here is the function:
def set_random_seed(seed):
"""Set random seeds."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) | Set random seeds. |
3,925 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
from .dist_util import master_only
from .logger import get_root_logger
def mkdir_and_rename(path):
"""mkdirs. If path exists, rename it with timestamp and create a new one.
Args:
path (str): Folder path.
"""
if osp.exists(path):
new_name = path + '_archived_' + get_time_str()
print(f'Path already exists. Rename it to {new_name}', flush=True)
os.rename(path, new_name)
os.makedirs(path, exist_ok=True)
The provided code snippet includes necessary dependencies for implementing the `make_exp_dirs` function. Write a Python function `def make_exp_dirs(opt)` to solve the following problem:
Make dirs for experiments.
Here is the function:
def make_exp_dirs(opt):
"""Make dirs for experiments."""
path_opt = opt['path'].copy()
if opt['is_train']:
mkdir_and_rename(path_opt.pop('experiments_root'))
else:
mkdir_and_rename(path_opt.pop('results_root'))
for key, path in path_opt.items():
if ('strict_load' not in key) and ('pretrain_network'
not in key) and ('resume'
not in key):
os.makedirs(path, exist_ok=True) | Make dirs for experiments. |
3,926 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
from .dist_util import master_only
from .logger import get_root_logger
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative pathes.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(
entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive)
The provided code snippet includes necessary dependencies for implementing the `scandir_SIDD` function. Write a Python function `def scandir_SIDD(dir_path, keywords=None, recursive=False, full_path=False)` to solve the following problem:
Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. keywords (str | tuple(str), optional): File keywords that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative pathes.
Here is the function:
def scandir_SIDD(dir_path, keywords=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
keywords (str | tuple(str), optional): File keywords that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative pathes.
"""
if (keywords is not None) and not isinstance(keywords, (str, tuple)):
raise TypeError('"keywords" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, keywords, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if keywords is None:
yield return_path
elif return_path.find(keywords) > 0:
yield return_path
else:
if recursive:
yield from _scandir(
entry.path, keywords=keywords, recursive=recursive)
else:
continue
return _scandir(dir_path, keywords=keywords, recursive=recursive) | Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. keywords (str | tuple(str), optional): File keywords that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative pathes. |
3,927 | import numpy as np
import os
import random
import time
import torch
from os import path as osp
from .dist_util import master_only
from .logger import get_root_logger
def get_root_logger(logger_name='basicsr',
log_level=logging.INFO,
log_file=None):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added.
Args:
logger_name (str): root logger name. Default: 'basicsr'.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
logger = logging.getLogger(logger_name)
# if the logger has been initialized, just return it
if logger.hasHandlers():
return logger
format_str = '%(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(format=format_str, level=log_level)
rank, _ = get_dist_info()
if rank != 0:
logger.setLevel('ERROR')
elif log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setFormatter(logging.Formatter(format_str))
file_handler.setLevel(log_level)
logger.addHandler(file_handler)
return logger
The provided code snippet includes necessary dependencies for implementing the `check_resume` function. Write a Python function `def check_resume(opt, resume_iter)` to solve the following problem:
Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration.
Here is the function:
def check_resume(opt, resume_iter):
"""Check resume states and pretrain_network paths.
Args:
opt (dict): Options.
resume_iter (int): Resume iteration.
"""
logger = get_root_logger()
if opt['path']['resume_state']:
# get all the networks
networks = [key for key in opt.keys() if key.startswith('network_')]
flag_pretrain = False
for network in networks:
if opt['path'].get(f'pretrain_{network}') is not None:
flag_pretrain = True
if flag_pretrain:
logger.warning(
'pretrain_network path will be ignored during resuming.')
# set pretrained model paths
for network in networks:
name = f'pretrain_{network}'
basename = network.replace('network_', '')
if opt['path'].get('ignore_resume_networks') is None or (
basename not in opt['path']['ignore_resume_networks']):
opt['path'][name] = osp.join(
opt['path']['models'], f'net_{basename}_{resume_iter}.pth')
logger.info(f"Set {name} to {opt['path'][name]}") | Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration. |
3,928 | import cv2
import numpy as np
import os
def dequantize_flow(dx, dy, max_val=0.02, denorm=True):
"""Recover from quantized flow.
Args:
dx (ndarray): Quantized dx.
dy (ndarray): Quantized dy.
max_val (float): Maximum value used when quantizing.
denorm (bool): Whether to multiply flow values with width/height.
Returns:
ndarray: Dequantized flow.
"""
assert dx.shape == dy.shape
assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)
dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]
if denorm:
dx *= dx.shape[1]
dy *= dx.shape[0]
flow = np.dstack((dx, dy))
return flow
The provided code snippet includes necessary dependencies for implementing the `flowread` function. Write a Python function `def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs)` to solve the following problem:
Read an optical flow map. Args: flow_path (ndarray or str): Flow path. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array
Here is the function:
def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs):
"""Read an optical flow map.
Args:
flow_path (ndarray or str): Flow path.
quantize (bool): whether to read quantized pair, if set to True,
remaining args will be passed to :func:`dequantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
Returns:
ndarray: Optical flow represented as a (h, w, 2) numpy array
"""
if quantize:
assert concat_axis in [0, 1]
cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED)
if cat_flow.ndim != 2:
raise IOError(f'{flow_path} is not a valid quantized flow file, '
f'its dimension is {cat_flow.ndim}.')
assert cat_flow.shape[concat_axis] % 2 == 0
dx, dy = np.split(cat_flow, 2, axis=concat_axis)
flow = dequantize_flow(dx, dy, *args, **kwargs)
else:
with open(flow_path, 'rb') as f:
try:
header = f.read(4).decode('utf-8')
except Exception:
raise IOError(f'Invalid flow file: {flow_path}')
else:
if header != 'PIEH':
raise IOError(f'Invalid flow file: {flow_path}, '
'header does not contain PIEH')
w = np.fromfile(f, np.int32, 1).squeeze()
h = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))
return flow.astype(np.float32) | Read an optical flow map. Args: flow_path (ndarray or str): Flow path. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array |
3,929 | import cv2
import numpy as np
import os
def quantize_flow(flow, max_val=0.02, norm=True):
"""Quantize flow to [0, 255].
After this step, the size of flow will be much smaller, and can be
dumped as jpeg images.
Args:
flow (ndarray): (h, w, 2) array of optical flow.
max_val (float): Maximum value of flow, values beyond
[-max_val, max_val] will be truncated.
norm (bool): Whether to divide flow values by image width/height.
Returns:
tuple[ndarray]: Quantized dx and dy.
"""
h, w, _ = flow.shape
dx = flow[..., 0]
dy = flow[..., 1]
if norm:
dx = dx / w # avoid inplace operations
dy = dy / h
# use 255 levels instead of 256 to make sure 0 is 0 after dequantization.
flow_comps = [
quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]
]
return tuple(flow_comps)
The provided code snippet includes necessary dependencies for implementing the `flowwrite` function. Write a Python function `def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs)` to solve the following problem:
Write optical flow to file. If the flow is not quantized, it will be saved as a .flo file losslessly, otherwise a jpeg image which is lossy but of much smaller size. (dx and dy will be concatenated horizontally into a single image if quantize is True.) Args: flow (ndarray): (h, w, 2) array of optical flow. filename (str): Output filepath. quantize (bool): Whether to quantize the flow and save it to 2 jpeg images. If set to True, remaining args will be passed to :func:`quantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False.
Here is the function:
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
"""Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
"""
if not quantize:
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert concat_axis in [0, 1]
dx, dy = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
os.makedirs(filename, exist_ok=True)
cv2.imwrite(dxdy, filename) | Write optical flow to file. If the flow is not quantized, it will be saved as a .flo file losslessly, otherwise a jpeg image which is lossy but of much smaller size. (dx and dy will be concatenated horizontally into a single image if quantize is True.) Args: flow (ndarray): (h, w, 2) array of optical flow. filename (str): Output filepath. quantize (bool): Whether to quantize the flow and save it to 2 jpeg images. If set to True, remaining args will be passed to :func:`quantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. |
3,931 | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
The provided code snippet includes necessary dependencies for implementing the `init_wandb_logger` function. Write a Python function `def init_wandb_logger(opt)` to solve the following problem:
We now only use wandb to sync tensorboard log.
Here is the function:
def init_wandb_logger(opt):
"""We now only use wandb to sync tensorboard log."""
import wandb
logger = logging.getLogger('basicsr')
project = opt['logger']['wandb']['project']
resume_id = opt['logger']['wandb'].get('resume_id')
if resume_id:
wandb_id = resume_id
resume = 'allow'
logger.warning(f'Resume wandb logger with id={wandb_id}.')
else:
wandb_id = wandb.util.generate_id()
resume = 'never'
wandb.init(
id=wandb_id,
resume=resume,
name=opt['name'],
config=opt,
project=project,
sync_tensorboard=True)
logger.info(f'Use wandb logger with id={wandb_id}; project={project}.') | We now only use wandb to sync tensorboard log. |
3,932 | import datetime
import logging
import time
from .dist_util import get_dist_info, master_only
__version__ = '1.2.0+386ca20'
The provided code snippet includes necessary dependencies for implementing the `get_env_info` function. Write a Python function `def get_env_info()` to solve the following problem:
Get environment information. Currently, only log the software version.
Here is the function:
def get_env_info():
"""Get environment information.
Currently, only log the software version.
"""
import torch
import torchvision
from basicsr.version import __version__
msg = r"""
____ _ _____ ____
/ __ ) ____ _ _____ (_)_____/ ___/ / __ \
/ __ |/ __ `// ___// // ___/\__ \ / /_/ /
/ /_/ // /_/ /(__ )/ // /__ ___/ // _, _/
/_____/ \__,_//____//_/ \___//____//_/ |_|
______ __ __ __ __
/ ____/____ ____ ____/ / / / __ __ _____ / /__ / /
/ / __ / __ \ / __ \ / __ / / / / / / // ___// //_/ / /
/ /_/ // /_/ // /_/ // /_/ / / /___/ /_/ // /__ / /< /_/
\____/ \____/ \____/ \____/ /_____/\____/ \___//_/|_| (_)
"""
msg += ('\nVersion Information: '
f'\n\tBasicSR: {__version__}'
f'\n\tPyTorch: {torch.__version__}'
f'\n\tTorchVision: {torchvision.__version__}')
return msg | Get environment information. Currently, only log the software version. |
3,933 | import torch
from basicsr.models import create_model
from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite, set_random_seed
import argparse
from basicsr.utils.options import dict2str, parse
from basicsr.utils.dist_util import get_dist_info, init_dist
import random
def parse(opt_path, is_train=True):
"""Parse option file.
Args:
opt_path (str): Option file path.
is_train (str): Indicate whether in training or not. Default: True.
Returns:
(dict): Options.
"""
with open(opt_path, mode='r') as f:
Loader, _ = ordered_yaml()
opt = yaml.load(f, Loader=Loader)
opt['is_train'] = is_train
# datasets
if 'datasets' in opt:
for phase, dataset in opt['datasets'].items():
# for several datasets, e.g., test_1, test_2
phase = phase.split('_')[0]
dataset['phase'] = phase
if 'scale' in opt:
dataset['scale'] = opt['scale']
if dataset.get('dataroot_gt') is not None:
dataset['dataroot_gt'] = osp.expanduser(dataset['dataroot_gt'])
if dataset.get('dataroot_lq') is not None:
dataset['dataroot_lq'] = osp.expanduser(dataset['dataroot_lq'])
# paths
for key, val in opt['path'].items():
if (val is not None) and ('resume_state' in key
or 'pretrain_network' in key):
opt['path'][key] = osp.expanduser(val)
opt['path']['root'] = osp.abspath(
osp.join(__file__, osp.pardir, osp.pardir, osp.pardir))
if is_train:
experiments_root = osp.join(opt['path']['root'], 'experiments',
opt['name'])
opt['path']['experiments_root'] = experiments_root
opt['path']['models'] = osp.join(experiments_root, 'models')
opt['path']['training_states'] = osp.join(experiments_root,
'training_states')
opt['path']['log'] = experiments_root
opt['path']['visualization'] = osp.join(experiments_root,
'visualization')
# change some options for debug mode
if 'debug' in opt['name']:
if 'val' in opt:
opt['val']['val_freq'] = 8
opt['logger']['print_freq'] = 1
opt['logger']['save_checkpoint_freq'] = 8
else: # test
results_root = osp.join(opt['path']['root'], 'results', opt['name'])
opt['path']['results_root'] = results_root
opt['path']['log'] = results_root
opt['path']['visualization'] = osp.join(results_root, 'visualization')
return opt
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def parse_options(is_train=True):
parser = argparse.ArgumentParser()
parser.add_argument(
'-opt', type=str, required=True, help='Path to option YAML file.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--input_l_path', type=str, required=True, help='The path to the input left image. For stereo image inference only.')
parser.add_argument('--input_r_path', type=str, required=True, help='The path to the input right image. For stereo image inference only.')
parser.add_argument('--output_l_path', type=str, required=True, help='The path to the output left image. For stereo image inference only.')
parser.add_argument('--output_r_path', type=str, required=True, help='The path to the output right image. For stereo image inference only.')
args = parser.parse_args()
opt = parse(args.opt, is_train=is_train)
# distributed settings
if args.launcher == 'none':
opt['dist'] = False
print('Disable distributed.', flush=True)
else:
opt['dist'] = True
if args.launcher == 'slurm' and 'dist_params' in opt:
init_dist(args.launcher, **opt['dist_params'])
else:
init_dist(args.launcher)
print('init dist .. ', args.launcher)
opt['rank'], opt['world_size'] = get_dist_info()
# random seed
seed = opt.get('manual_seed')
if seed is None:
seed = random.randint(1, 10000)
opt['manual_seed'] = seed
set_random_seed(seed + opt['rank'])
opt['img_path'] = {
'input_l': args.input_l_path,
'input_r': args.input_r_path,
'output_l': args.output_l_path,
'output_r': args.output_r_path
}
return opt | null |
3,934 | import torch
from basicsr.models import create_model
from basicsr.utils import FileClient, imfrombytes, img2tensor, padding, tensor2img, imwrite, set_random_seed
import argparse
from basicsr.utils.options import dict2str, parse
from basicsr.utils.dist_util import get_dist_info, init_dist
import random
def imread(img_path):
file_client = FileClient('disk')
img_bytes = file_client.get(img_path, None)
try:
img = imfrombytes(img_bytes, float32=True)
except:
raise Exception("path {} not working".format(img_path))
img = img2tensor(img, bgr2rgb=True, float32=True)
return img | null |
3,935 | import torch
import numpy as np
import cv2
import tempfile
import matplotlib.pyplot as plt
from cog import BasePredictor, Path, Input, BaseModel
from basicsr.models import create_model
from basicsr.utils import img2tensor as _img2tensor, tensor2img, imwrite
from basicsr.utils.options import parse
def imread(img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img | null |
3,936 | import torch
import numpy as np
import cv2
import tempfile
import matplotlib.pyplot as plt
from cog import BasePredictor, Path, Input, BaseModel
from basicsr.models import create_model
from basicsr.utils import img2tensor as _img2tensor, tensor2img, imwrite
from basicsr.utils.options import parse
def img2tensor(img, bgr2rgb=False, float32=True):
img = img.astype(np.float32) / 255.0
return _img2tensor(img, bgr2rgb=bgr2rgb, float32=float32) | null |
3,937 | import torch
import numpy as np
import cv2
import tempfile
import matplotlib.pyplot as plt
from cog import BasePredictor, Path, Input, BaseModel
from basicsr.models import create_model
from basicsr.utils import img2tensor as _img2tensor, tensor2img, imwrite
from basicsr.utils.options import parse
def single_image_inference(model, img, save_path):
model.feed_data(data={"lq": img.unsqueeze(dim=0)})
if model.opt["val"].get("grids", False):
model.grids()
model.test()
if model.opt["val"].get("grids", False):
model.grids_inverse()
visuals = model.get_current_visuals()
sr_img = tensor2img([visuals["result"]])
imwrite(sr_img, save_path) | null |
3,938 | import torch
import numpy as np
import cv2
import tempfile
import matplotlib.pyplot as plt
from cog import BasePredictor, Path, Input, BaseModel
from basicsr.models import create_model
from basicsr.utils import img2tensor as _img2tensor, tensor2img, imwrite
from basicsr.utils.options import parse
def stereo_image_inference(model, img_l, img_r, out_path):
img = torch.cat([img_l, img_r], dim=0)
model.feed_data(data={"lq": img.unsqueeze(dim=0)})
if model.opt["val"].get("grids", False):
model.grids()
model.test()
if model.opt["val"].get("grids", False):
model.grids_inverse()
visuals = model.get_current_visuals()
img_L = visuals["result"][:, :3]
img_R = visuals["result"][:, 3:]
img_L, img_R = tensor2img([img_L, img_R], rgb2bgr=False)
# save_stereo_image
h, w = img_L.shape[:2]
fig = plt.figure(figsize=(w // 40, h // 40))
ax1 = fig.add_subplot(2, 1, 1)
plt.title("NAFSSR output (Left)", fontsize=14)
ax1.axis("off")
ax1.imshow(img_L)
ax2 = fig.add_subplot(2, 1, 2)
plt.title("NAFSSR output (Right)", fontsize=14)
ax2.axis("off")
ax2.imshow(img_R)
plt.subplots_adjust(hspace=0.08)
plt.savefig(str(out_path), bbox_inches="tight", dpi=600) | null |
3,939 | import cv2
import numpy as np
import os
import sys
from multiprocessing import Pool
from os import path as osp
from tqdm import tqdm
from basicsr.utils import scandir_SIDD
from basicsr.utils.create_lmdb import create_lmdb_for_SIDD
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower
than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
img_name = img_name.replace(opt['keywords'], '')
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img.ndim == 2:
h, w = img.shape
elif img.ndim == 3:
h, w, c = img.shape
else:
raise ValueError(f'Image ndim should be 2 or 3, but got {img.ndim}')
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cropped_img = np.ascontiguousarray(cropped_img)
cv2.imwrite(
osp.join(opt['save_folder'],
f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info
The provided code snippet includes necessary dependencies for implementing the `extract_subimages` function. Write a Python function `def extract_subimages(opt)` to solve the following problem:
Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number.
Here is the function:
def extract_subimages(opt):
"""Crop images to subimages.
Args:
opt (dict): Configuration dict. It contains:
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
n_thread (int): Thread number.
"""
input_folder = opt['input_folder']
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print(f'mkdir {save_folder} ...')
else:
print(f'Folder {save_folder} already exists. Exit.')
# sys.exit(1)
img_list = list(scandir_SIDD(input_folder, keywords=opt['keywords'], recursive=True, full_path=True))
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
pool = Pool(opt['n_thread'])
for path in img_list:
pool.apply_async(
worker, args=(path, opt), callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
pbar.close()
print('All processes done.') | Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number. |
3,940 | import cv2
import numpy as np
import os
import sys
from multiprocessing import Pool
from os import path as osp
from tqdm import tqdm
from basicsr.utils import scandir
from basicsr.utils.create_lmdb import create_lmdb_for_gopro
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower
than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
# remove the x2, x3, x4 and x8 in the filename for DIV2K
img_name = img_name.replace('x2',
'').replace('x3',
'').replace('x4',
'').replace('x8', '')
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if img.ndim == 2:
h, w = img.shape
elif img.ndim == 3:
h, w, c = img.shape
else:
raise ValueError(f'Image ndim should be 2 or 3, but got {img.ndim}')
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cropped_img = np.ascontiguousarray(cropped_img)
cv2.imwrite(
osp.join(opt['save_folder'],
f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info
The provided code snippet includes necessary dependencies for implementing the `extract_subimages` function. Write a Python function `def extract_subimages(opt)` to solve the following problem:
Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number.
Here is the function:
def extract_subimages(opt):
"""Crop images to subimages.
Args:
opt (dict): Configuration dict. It contains:
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
n_thread (int): Thread number.
"""
input_folder = opt['input_folder']
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print(f'mkdir {save_folder} ...')
else:
print(f'Folder {save_folder} already exists. Exit.')
sys.exit(1)
img_list = list(scandir(input_folder, full_path=True))
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
pool = Pool(opt['n_thread'])
for path in img_list:
pool.apply_async(
worker, args=(path, opt), callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
pbar.close()
print('All processes done.') | Crop images to subimages. Args: opt (dict): Configuration dict. It contains: input_folder (str): Path to the input folder. save_folder (str): Path to save folder. n_thread (int): Thread number. |
3,941 | import os
import time
from basicsr.utils.create_lmdb import create_lmdb_for_reds
def make_val_300(folder, dst):
if not os.path.exists(dst):
os.mkdir(dst)
templates = '*9.*'
cp_command = 'cp {} {}'.format(os.path.join(folder, templates), dst)
os.system(cp_command) | null |
3,942 | import os
import time
from basicsr.utils.create_lmdb import create_lmdb_for_reds
def flatten_folders(folder):
for vid in range(300):
vidfolder_path = '{:03}'.format(vid)
if not os.path.exists(os.path.join(folder, vidfolder_path)):
continue
print('working on .. {} .. {}'.format(folder, vid))
for fid in range(100):
src_filename = '{:08}'.format(fid)
suffixes = ['.jpg', '.png']
suffix = None
for suf in suffixes:
# print(os.path.join(folder, vidfolder_path, src_filename+suf))
if os.path.exists(os.path.join(folder, vidfolder_path, src_filename+suf)):
suffix = suf
break
assert suffix is not None
src_filepath = os.path.join(folder, vidfolder_path, src_filename+suffix)
dst_filepath = os.path.join(folder, '{}_{}{}'.format(vidfolder_path, src_filename, suffix))
os.system('mv {} {}'.format(src_filepath, dst_filepath))
time.sleep(0.001)
os.system('rm -r {}'.format(os.path.join(folder, vidfolder_path))) | null |
3,943 | from datetime import datetime
import time
import requests
import sys
import json
from azure.identity import AzureCliCredential
import logging
from azure.ai.ml import MLClient
from sseclient import SSEClient
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def apply_delta(base: dict, delta: dict):
def score(url, api_key, body, stream=True, on_event=None):
headers = {
"Content-Type": "application/json",
"Authorization": ("Bearer " + api_key),
# The azureml-model-deployment header will force the request to go to a specific deployment.
# Remove this header to have the request observe the endpoint traffic rules
"azureml-model-deployment": "blue",
"Accept": "text/event-stream, application/json" if stream else "application/json"
}
logger.info("Sending HTTP request...")
logger.debug("POST %s", url)
for name, value in headers.items():
if name == "Authorization":
value = "[REDACTED]"
logger.debug(f">>> {name}: {value}")
logger.debug(json.dumps(body, indent=4, ensure_ascii=False))
logger.debug("")
time1 = datetime.now()
response = None
try:
response = requests.post(url, json=body, headers=headers, stream=stream)
response.raise_for_status()
finally:
time2 = datetime.now()
if response is not None:
logger.info(
"Got response: %d %s (elapsed %s)",
response.status_code,
response.reason,
time2 - time1,
)
for name, value in response.headers.items():
logger.debug(f"<<< {name}: {value}")
time1 = datetime.now()
try:
content_type = response.headers.get('Content-Type')
if "text/event-stream" in content_type:
output = {}
client = SSEClient(response)
for event in client.events():
if on_event:
on_event(event)
dct = json.loads(event.data)
apply_delta(output, dct)
return output, True
else:
return response.json(), False
finally:
time2 = datetime.now()
logger.info("\nResponse reading elapsed: %s", time2 - time1) | null |
3,944 | import os
import re
from io import open
from typing import Any, List, Match, cast
from setuptools import find_namespace_packages, setup
with open(os.path.join(PACKAGE_FOLDER_PATH, "version.txt"), "r") as fd:
version_content = fd.read()
print(version_content)
version = cast(Match[Any], re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', version_content, re.MULTILINE)).group(1)
with open("README.md", encoding="utf-8") as f:
readme = f.read()
with open("CHANGELOG.md", encoding="utf-8") as f:
changelog = f.read()
def parse_requirements(file_name: str) -> List[str]:
with open(file_name) as f:
return [
require.strip() for require in f
if require.strip() and not require.startswith('#')
] | null |
3,945 | from openai import OpenAIError
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
openai_error_code_ref_message = "Error reference: https://platform.openai.com/docs/guides/error-codes/api-errors"
def to_openai_error_message(e: Exception) -> str:
ex_type = type(e).__name__
error_message = str(e)
# https://learn.microsoft.com/en-gb/azure/ai-services/openai/reference
params_chat_model_cannot_accept = ["best_of", "echo", "logprobs"]
if error_message == "<empty message>":
msg = "The api key is invalid or revoked. " \
"You can correct or regenerate the api key of your connection."
return f"OpenAI API hits {ex_type}: {msg}"
# for models that do not support the `functions` parameter.
elif "Unrecognized request argument supplied: functions" in error_message:
msg = "Current model does not support the `functions` parameter. If you are using openai connection, then " \
"please use gpt-3.5-turbo, gpt-4, gpt-4-32k, gpt-3.5-turbo-0613 or gpt-4-0613. You can refer to " \
"https://platform.openai.com/docs/guides/gpt/function-calling. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo' or " \
"'gpt-4' with version 0613, then go to prompt flow connection page, upgrade connection api version to " \
"'2023-07-01-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling."
return f"OpenAI API hits {ex_type}: {msg}"
elif "The completion operation does not work with the specified model" in error_message or \
("parameters are not available" in error_message
and any(param in error_message for param in params_chat_model_cannot_accept)):
msg = "The completion operation does not work with the current model. " \
"Completion API is a legacy api and is going to be deprecated soon. " \
"Please change to use Chat API for current model. " \
"You could refer to guideline at https://aka.ms/pfdoc/chat-prompt " \
"or view the samples in our gallery that contain 'Chat' in the name."
return f"OpenAI API hits {ex_type}: {msg}"
elif "Invalid content type. image_url is only supported by certain models" in error_message:
msg = "Current model does not support the image input. If you are using openai connection, then please use " \
"gpt-4-vision-preview. You can refer to https://platform.openai.com/docs/guides/vision." \
"If you are using azure openai connection, then please first go to your Azure OpenAI resource, " \
"create a GPT-4 Turbo with Vision deployment by selecting model name: \"gpt-4\" and "\
"model version \"vision-preview\". You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/gpt-with-vision"
return f"OpenAI API hits {ex_type}: {msg}"
elif ("\'response_format\' of type" in error_message and "is not supported with this model." in error_message)\
or ("Additional properties are not allowed" in error_message
and "unexpected) - \'response_format\'" in error_message):
msg = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}. " \
"The value associated with the type key should be either 'text' or 'json_object' " \
"If you are using openai connection, you can only set response_format to { \"type\": \"json_object\" } " \
"when calling gpt-3.5-turbo-1106 or gpt-4-1106-preview to enable JSON mode. You can refer to " \
"https://platform.openai.com/docs/guides/text-generation/json-mode. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo-1106' or " \
"'gpt-4-1106-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/json-mode?tabs=python."
return f"OpenAI API hits {ex_type}: {msg}"
elif "Principal does not have access to API/Operation" in error_message:
msg = "Principal does not have access to API/Operation. If you are using azure openai connection, " \
"please make sure you have proper role assignment on your azure openai resource. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control"
return f"OpenAI API hits {ex_type}: {msg}"
else:
return f"OpenAI API hits {ex_type}: {error_message} [{openai_error_code_ref_message}]" | null |
3,946 | from enum import Enum
from typing import Union
from promptflow.tools.common import handle_openai_error, init_openai_client, init_azure_openai_client
from promptflow.tools.exception import InvalidConnectionType
from promptflow._internal import tool
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
class EmbeddingModel(str, Enum):
TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002"
TEXT_SEARCH_ADA_DOC_001 = "text-search-ada-doc-001"
TEXT_SEARCH_ADA_QUERY_001 = "text-search-ada-query-001"
def init_openai_client(connection: OpenAIConnection):
try:
from openai import OpenAI as OpenAIClient
except ImportError as e:
if "cannot import name 'OpenAI' from 'openai'" in str(e):
raise ImportError(
"Please upgrade your OpenAI package to version 1.0.0 or later" +
"using the command: pip install --upgrade openai.")
else:
raise e
conn_dict = normalize_connection_config(connection)
return OpenAIClient(**conn_dict)
def init_azure_openai_client(connection: AzureOpenAIConnection):
try:
from openai import AzureOpenAI as AzureOpenAIClient
except ImportError as e:
if "cannot import name 'AzureOpenAI' from 'openai'" in str(e):
raise ImportError(
"Please upgrade your OpenAI package to version 1.0.0 or later" +
"using the command: pip install --upgrade openai.")
else:
raise e
conn_dict = normalize_connection_config(connection)
return AzureOpenAIClient(**conn_dict)
class InvalidConnectionType(ToolValidationError):
"""Base exception raised when failed to pass invalid connection type."""
pass
def embedding(connection: Union[AzureOpenAIConnection, OpenAIConnection], input: str, deployment_name: str = "",
model: EmbeddingModel = EmbeddingModel.TEXT_EMBEDDING_ADA_002):
if isinstance(connection, AzureOpenAIConnection):
client = init_azure_openai_client(connection)
return client.embeddings.create(
input=input,
model=deployment_name,
extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"}
).data[0].embedding
elif isinstance(connection, OpenAIConnection):
client = init_openai_client(connection)
return client.embeddings.create(
input=input,
model=model
).data[0].embedding
else:
error_message = f"Not Support connection type '{type(connection).__name__}' for embedding api. " \
f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]."
raise InvalidConnectionType(message=error_message) | null |
3,947 | from enum import Enum
from typing import Dict, List, Union
import json
import requests
from promptflow import tool, ToolProvider
from promptflow.connections import AzureContentSafetyConnection
from promptflow.tools.exception import AzureContentSafetyInputValueError, AzureContentSafetySystemError
class TextCategorySensitivity(str, Enum):
DISABLE = "disable"
LOW_SENSITIVITY = "low_sensitivity"
MEDIUM_SENSITIVITY = "medium_sensitivity"
HIGH_SENSITIVITY = "high_sensitivity"
class AzureContentSafety(ToolProvider):
"""
Doc reference :
https://review.learn.microsoft.com/en-us/azure/cognitive-services/content-safety/quickstart?branch=pr-en-us-233724&pivots=programming-language-rest
"""
def __init__(self, connection: AzureContentSafetyConnection):
self.connection = connection
super(AzureContentSafety, self).__init__()
def analyze_text(
self,
text: str,
hate_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
sexual_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
self_harm_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
violence_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
):
content_safety = ContentSafety(self.connection.endpoint, self.connection.api_key, self.connection.api_version)
media_type = MediaType.Text
blocklists = []
detection_result = content_safety.detect(media_type, text, blocklists)
# Set the reject thresholds for each category
reject_thresholds = {
Category.Hate: switch_category_threshold(hate_category),
Category.SelfHarm: switch_category_threshold(self_harm_category),
Category.Sexual: switch_category_threshold(sexual_category),
Category.Violence: switch_category_threshold(violence_category),
}
# Make a decision based on the detection result and reject thresholds
if self.connection.api_version == "2023-10-01":
decision_result = content_safety.make_decision_1001(detection_result, reject_thresholds)
else:
decision_result = content_safety.make_decision(detection_result, reject_thresholds)
return convert_decision_to_json(decision_result)
def analyze_text(
connection: AzureContentSafetyConnection,
text: str,
hate_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
sexual_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
self_harm_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
violence_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY,
):
return AzureContentSafety(connection).analyze_text(
text=text,
hate_category=hate_category,
sexual_category=sexual_category,
self_harm_category=self_harm_category,
violence_category=violence_category,
) | null |
3,948 | from enum import Enum
from typing import Dict, List, Union
import json
import requests
from promptflow import tool, ToolProvider
from promptflow.connections import AzureContentSafetyConnection
from promptflow.tools.exception import AzureContentSafetyInputValueError, AzureContentSafetySystemError
class TextCategorySensitivity(str, Enum):
def switch_category_threshold(sensitivity: TextCategorySensitivity) -> int:
switcher = {
TextCategorySensitivity.DISABLE: -1,
TextCategorySensitivity.LOW_SENSITIVITY: 6,
TextCategorySensitivity.MEDIUM_SENSITIVITY: 4,
TextCategorySensitivity.HIGH_SENSITIVITY: 2,
}
return switcher.get(sensitivity, f"Non-supported sensitivity: {sensitivity}") | null |
3,949 | from enum import Enum
from typing import Dict, List, Union
import json
import requests
from promptflow import tool, ToolProvider
from promptflow.connections import AzureContentSafetyConnection
from promptflow.tools.exception import AzureContentSafetyInputValueError, AzureContentSafetySystemError
class Decision(object):
def __init__(self, suggested_action: Action, action_by_category: Dict[Category, Action]) -> None:
"""
Represents the decision made by the content moderation system.
Args:
- suggested_action (Action): The suggested action to take.
- action_by_category (dict[Category, Action]): The action to take for each category.
"""
self.suggested_action = suggested_action
self.action_by_category = action_by_category
def convert_decision_to_json(decision: Decision):
result_json = {}
result_json["suggested_action"] = decision.suggested_action.value
category_json = {}
for key, value in decision.action_by_category.items():
category_json[key.name] = value.value
result_json["action_by_category"] = category_json
return result_json | null |
3,950 | from enum import Enum
from promptflow.tools.common import render_jinja_template, handle_openai_error, \
parse_chat, to_bool, validate_functions, process_function_call, \
post_process_chat_api_response, init_openai_client
from promptflow._internal import ToolProvider, tool, register_apis
from promptflow.connections import OpenAIConnection
from promptflow.contracts.types import PromptTemplate
class Engine(str, Enum):
class OpenAI(ToolProvider):
def __init__(self, connection: OpenAIConnection):
def completion(
self,
prompt: PromptTemplate,
model: Engine = Engine.TEXT_DAVINCI_003,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
def generator():
def chat(
self,
prompt: PromptTemplate,
model: str = "gpt-3.5-turbo",
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs
) -> [str, dict]:
def completion(
connection: OpenAIConnection,
prompt: PromptTemplate,
model: Engine = Engine.TEXT_DAVINCI_003,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1,
n: int = 1,
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs
) -> [str, dict]:
return OpenAI(connection).completion(
prompt=prompt,
model=model,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
logprobs=logprobs,
echo=echo,
stop=stop if stop else None,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
**kwargs,
) | null |
3,951 | from enum import Enum
from promptflow.tools.common import render_jinja_template, handle_openai_error, \
parse_chat, to_bool, validate_functions, process_function_call, \
post_process_chat_api_response, init_openai_client
from promptflow._internal import ToolProvider, tool, register_apis
from promptflow.connections import OpenAIConnection
from promptflow.contracts.types import PromptTemplate
class OpenAI(ToolProvider):
def __init__(self, connection: OpenAIConnection):
super().__init__()
self._client = init_openai_client(connection)
def completion(
self,
prompt: PromptTemplate,
model: Engine = Engine.TEXT_DAVINCI_003,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
stream = to_bool(stream)
response = self._client.completions.create(
prompt=prompt,
model=model.value if isinstance(model, Enum) else model,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
stream=stream,
logprobs=int(logprobs) if logprobs else None,
echo=echo,
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user
)
if stream:
def generator():
for chunk in response:
if chunk.choices:
yield getattr(chunk.choices[0], "text", "")
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# get first element because prompt is single.
return response.choices[0].text
def chat(
self,
prompt: PromptTemplate,
model: str = "gpt-3.5-turbo",
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs
) -> [str, dict]:
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
messages = parse_chat(chat_str)
# TODO: remove below type conversion after client can pass json rather than string.
stream = to_bool(stream)
params = {
"model": model,
"messages": messages,
"temperature": float(temperature),
"top_p": float(top_p),
"n": int(n),
"stream": stream,
"max_tokens": int(max_tokens) if max_tokens is not None and str(max_tokens).lower() != "inf" else None,
"presence_penalty": float(presence_penalty),
"frequency_penalty": float(frequency_penalty),
"user": user,
}
if functions is not None:
validate_functions(functions)
params["functions"] = functions
params["function_call"] = process_function_call(function_call)
# to avoid vision model validation error for empty param values.
if stop:
params["stop"] = stop
if max_tokens is not None and str(max_tokens).lower() != "inf":
params["max_tokens"] = int(max_tokens)
if logit_bias:
params["logit_bias"] = logit_bias
if response_format:
params["response_format"] = response_format
if seed is not None:
params["seed"] = seed
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, functions)
def chat(
connection: OpenAIConnection,
prompt: PromptTemplate,
model: str = "gpt-3.5-turbo",
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs
) -> [str, dict]:
return OpenAI(connection).chat(
prompt=prompt,
model=model,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop if stop else None,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
function_call=function_call,
functions=functions,
response_format=response_format,
seed=seed,
**kwargs,
) | null |
3,952 | import json
from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, to_bool, \
validate_functions, process_function_call, post_process_chat_api_response, init_azure_openai_client
from promptflow._internal import enable_cache, ToolProvider, tool, register_apis
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class AzureOpenAI(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
self._client = init_azure_openai_client(connection)
def calculate_cache_string_for_completion(
self,
**kwargs,
) -> str:
d = dict(self.connection)
d.pop("api_key")
d.update({**kwargs})
return json.dumps(d)
def completion(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
stream = to_bool(stream)
response = self._client.completions.create(
prompt=prompt,
model=deployment_name,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
stream=stream,
# TODO: remove below type conversion after client pass json rather than string.
# empty string will go to else branch, but original api cannot accept empty
# string, must be None.
logprobs=int(logprobs) if logprobs else None,
echo=echo,
# fix bug "[] is not valid under any of the given schemas-'stop'"
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user,
extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"})
if stream:
def generator():
for chunk in response:
if chunk.choices:
yield chunk.choices[0].text if hasattr(chunk.choices[0], 'text') and \
chunk.choices[0].text is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# get first element because prompt is single.
return response.choices[0].text
def chat(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs,
) -> [str, dict]:
# keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:".
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
messages = parse_chat(chat_str)
# TODO: remove below type conversion after client can pass json rather than string.
stream = to_bool(stream)
params = {
"model": deployment_name,
"messages": messages,
"temperature": float(temperature),
"top_p": float(top_p),
"n": int(n),
"stream": stream,
"presence_penalty": float(presence_penalty),
"frequency_penalty": float(frequency_penalty),
"user": user,
"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
}
if functions is not None:
validate_functions(functions)
params["functions"] = functions
params["function_call"] = process_function_call(function_call)
# to avoid vision model validation error for empty param values.
if stop:
params["stop"] = stop
if max_tokens is not None and str(max_tokens).lower() != "inf":
params["max_tokens"] = int(max_tokens)
if logit_bias:
params["logit_bias"] = logit_bias
if response_format:
params["response_format"] = response_format
if seed is not None:
params["seed"] = seed
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, functions)
def completion(
connection: AzureOpenAIConnection,
prompt: PromptTemplate,
deployment_name: str,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1,
n: int = 1,
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
return AzureOpenAI(connection).completion(
prompt=prompt,
deployment_name=deployment_name,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
logprobs=logprobs,
echo=echo,
stop=stop if stop else None,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
**kwargs,
) | null |
3,953 | import json
from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, to_bool, \
validate_functions, process_function_call, post_process_chat_api_response, init_azure_openai_client
from promptflow._internal import enable_cache, ToolProvider, tool, register_apis
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class AzureOpenAI(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
self._client = init_azure_openai_client(connection)
def calculate_cache_string_for_completion(
self,
**kwargs,
) -> str:
d = dict(self.connection)
d.pop("api_key")
d.update({**kwargs})
return json.dumps(d)
def completion(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
suffix: str = None,
max_tokens: int = 16,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
logprobs: int = None,
echo: bool = False,
stop: list = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
best_of: int = 1,
logit_bias: dict = {},
user: str = "",
**kwargs,
) -> str:
prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
# TODO: remove below type conversion after client can pass json rather than string.
echo = to_bool(echo)
stream = to_bool(stream)
response = self._client.completions.create(
prompt=prompt,
model=deployment_name,
# empty string suffix should be treated as None.
suffix=suffix if suffix else None,
max_tokens=int(max_tokens),
temperature=float(temperature),
top_p=float(top_p),
n=int(n),
stream=stream,
# TODO: remove below type conversion after client pass json rather than string.
# empty string will go to else branch, but original api cannot accept empty
# string, must be None.
logprobs=int(logprobs) if logprobs else None,
echo=echo,
# fix bug "[] is not valid under any of the given schemas-'stop'"
stop=stop if stop else None,
presence_penalty=float(presence_penalty),
frequency_penalty=float(frequency_penalty),
best_of=int(best_of),
# Logit bias must be a dict if we passed it to openai api.
logit_bias=logit_bias if logit_bias else {},
user=user,
extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"})
if stream:
def generator():
for chunk in response:
if chunk.choices:
yield chunk.choices[0].text if hasattr(chunk.choices[0], 'text') and \
chunk.choices[0].text is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# get first element because prompt is single.
return response.choices[0].text
def chat(
self,
prompt: PromptTemplate,
# for AOAI, deployment name is customized by user, not model name.
deployment_name: str,
temperature: float = 1.0,
top_p: float = 1.0,
n: int = 1,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
# function_call can be of type str or dict.
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs,
) -> [str, dict]:
# keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:".
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs)
messages = parse_chat(chat_str)
# TODO: remove below type conversion after client can pass json rather than string.
stream = to_bool(stream)
params = {
"model": deployment_name,
"messages": messages,
"temperature": float(temperature),
"top_p": float(top_p),
"n": int(n),
"stream": stream,
"presence_penalty": float(presence_penalty),
"frequency_penalty": float(frequency_penalty),
"user": user,
"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
}
if functions is not None:
validate_functions(functions)
params["functions"] = functions
params["function_call"] = process_function_call(function_call)
# to avoid vision model validation error for empty param values.
if stop:
params["stop"] = stop
if max_tokens is not None and str(max_tokens).lower() != "inf":
params["max_tokens"] = int(max_tokens)
if logit_bias:
params["logit_bias"] = logit_bias
if response_format:
params["response_format"] = response_format
if seed is not None:
params["seed"] = seed
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, functions)
def chat(
connection: AzureOpenAIConnection,
prompt: PromptTemplate,
deployment_name: str,
temperature: float = 1,
top_p: float = 1,
n: int = 1,
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
logit_bias: dict = {},
user: str = "",
function_call: object = None,
functions: list = None,
response_format: object = None,
seed: int = None,
**kwargs,
) -> str:
# chat model is not available in azure openai, so need to set the environment variable.
return AzureOpenAI(connection).chat(
prompt=prompt,
deployment_name=deployment_name,
temperature=temperature,
top_p=top_p,
n=n,
stream=stream,
stop=stop if stop else None,
max_tokens=max_tokens,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
logit_bias=logit_bias,
user=user,
function_call=function_call,
functions=functions,
response_format=response_format,
seed=seed,
**kwargs,
) | null |
3,954 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
class OpenModelLLMOnlineEndpointError(UserErrorException):
"""Base exception raised when the call to an online endpoint failed."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
def handle_online_endpoint_error(max_retries: int = 5,
initial_delay: float = 2,
exponential_base: float = 3):
def deco_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for i in range(max_retries):
try:
return func(*args, **kwargs)
except OpenModelLLMOnlineEndpointError as e:
if i == max_retries - 1:
error_message = f"Exception hit calling Online Endpoint: {type(e).__name__}: {str(e)}"
print(error_message, file=sys.stderr)
raise OpenModelLLMOnlineEndpointError(message=error_message)
delay *= exponential_base
time.sleep(delay)
return wrapper
return deco_retry | null |
3,955 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
def is_serverless_endpoint(endpoint_url: str) -> bool:
return "serverless.ml.azure.com" in endpoint_url or "inference.ai.azure.com" in endpoint_url | null |
3,956 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
def try_get_from_dict(some_dict: Dict, key_list: List):
for key in key_list:
if some_dict is None:
return some_dict
elif key in some_dict:
some_dict = some_dict[key]
else:
return None
return some_dict | null |
3,957 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
CONNECTION_CACHE_FILE = "pf_connection_names"
class ConnectionCache:
def __init__(self,
use_until: datetime,
subscription_id: str,
resource_group: str,
workspace_name: str,
connection_names: List[str]):
self.use_until = use_until
self.subscription_id = subscription_id
self.resource_group = resource_group
self.workspace_name = workspace_name
self.connection_names = connection_names
def from_filename(self, file):
cache = json.load(file)
return self(cache['use_until'],
cache['subscription_id'],
cache['resource_group'],
cache['workspace_name'],
cache['connection_names'])
def can_use(self,
subscription_id: str,
resource_group: str,
workspace_name: str):
use_until_time = datetime.fromisoformat(self.use_until)
return (use_until_time > datetime.now()
and self.subscription_id == subscription_id
and self.resource_group == resource_group
and self.workspace_name == workspace_name)
ENDPOINT_CONTAINER = EndpointsContainer()
CUSTOM_CONNECTION_CONTAINER = CustomConnectionsContainer()
SERVERLESS_ENDPOINT_CONTAINER = ServerlessEndpointsContainer()
def list_endpoint_names(subscription_id: str = None,
resource_group_name: str = None,
workspace_name: str = None,
return_endpoint_url: bool = False,
force_refresh: bool = False) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
# return an empty list if workspace triad is not available.
if not subscription_id or not resource_group_name or not workspace_name:
return []
cache_file_path = None
try:
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
cache_file_path = os.path.join(os.path.dirname(temp_file.name), CONNECTION_CACHE_FILE)
print(f"Attempting to read connection cache. File path: {cache_file_path}", file=sys.stdout)
if force_refresh:
print("....skipping. force_refresh is True", file=sys.stdout)
else:
with open(cache_file_path, 'r') as file:
cache = ConnectionCache.from_filename(file)
if cache.can_use(subscription_id, resource_group_name, workspace_name):
if len(cache.connection_names) > 0:
print("....using Connection Cache File", file=sys.stdout)
return cache.connection_names
else:
print("....skipping. No connections in file", file=sys.stdout)
else:
print("....skipping. File not relevant", file=sys.stdout)
except Exception as e:
print(f"....failed to find\\read connection cache file. Regenerating. Error:{e}", file=sys.stdout)
try:
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential(exclude_interactive_browser_credential=False)
token = credential.get_token("https://management.azure.com/.default").token
except Exception as e:
print(f"Skipping list_endpoint_names. Exception: {e}", file=sys.stderr)
msg = "Exception getting token: Please retry"
return [{"value": msg, "display_value": msg, "description": msg}]
serverless_endpoints = SERVERLESS_ENDPOINT_CONTAINER.list_serverless_endpoints(token,
subscription_id,
resource_group_name,
workspace_name,
return_endpoint_url)
online_endpoints = ENDPOINT_CONTAINER.list_endpoint_names(credential,
subscription_id,
resource_group_name,
workspace_name,
return_endpoint_url)
custom_connections = CUSTOM_CONNECTION_CONTAINER.list_custom_connection_names(credential,
subscription_id,
resource_group_name,
workspace_name,
return_endpoint_url)
list_of_endpoints = custom_connections + serverless_endpoints + online_endpoints
cache = ConnectionCache(use_until=(datetime.now() + timedelta(minutes=5)).isoformat(),
subscription_id=subscription_id,
resource_group=resource_group_name,
workspace_name=workspace_name,
connection_names=list_of_endpoints)
if len(list_of_endpoints) == 0:
msg = "No endpoints found. Please add a connection."
return [{"value": msg, "display_value": msg, "description": msg}]
if cache_file_path is not None:
try:
print(f"Attempting to write connection cache. File path: {cache_file_path}", file=sys.stdout)
with open(cache_file_path, 'w') as file:
json.dump(cache, file, default=lambda obj: obj.__dict__)
print("....written", file=sys.stdout)
except Exception as e:
print(f"""....failed to write connection cache file. Will need to reload next time.
Error:{e}""", file=sys.stdout)
return list_of_endpoints | null |
3,958 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
DEPLOYMENT_DEFAULT = "default"
ENDPOINT_CONTAINER = EndpointsContainer()
def parse_endpoint_connection_type(endpoint_connection_name: str) -> Tuple[str, str]:
endpoint_connection_details = endpoint_connection_name.split("/")
return (endpoint_connection_details[0].lower(), endpoint_connection_details[1])
def list_deployment_names(subscription_id: str = None,
resource_group_name: str = None,
workspace_name: str = None,
endpoint: str = None) -> List[Dict[str, Union[str, int, float, list, Dict]]]:
# return an empty list if workspace triad is not available.
if not subscription_id or not resource_group_name or not workspace_name:
return []
deployment_default_list = [{
"value": DEPLOYMENT_DEFAULT,
"display_value": DEPLOYMENT_DEFAULT,
"description": "This will use the default deployment for the selected online endpoint."
+ "You can also manually enter a deployment name here."
}]
if endpoint is None or endpoint.strip() == "" or "/" not in endpoint:
return deployment_default_list
(endpoint_connection_type, endpoint_connection_name) = parse_endpoint_connection_type(endpoint)
if endpoint_connection_type != "onlineendpoint":
return deployment_default_list
try:
from azure.identity import DefaultAzureCredential
credential = DefaultAzureCredential(exclude_interactive_browser_credential=False)
except Exception as e:
print(f"Skipping list_deployment_names. Exception: {e}", file=sys.stderr)
return deployment_default_list
return deployment_default_list + ENDPOINT_CONTAINER.list_deployment_names(
credential,
subscription_id,
resource_group_name,
workspace_name,
endpoint_connection_name
) | null |
3,959 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
class ModelFamily(str, Enum):
LLAMA = "LLaMa"
DOLLY = "Dolly"
GPT2 = "GPT-2"
FALCON = "Falcon"
def _missing_(cls, value):
value = value.lower()
for member in cls:
if member.lower() == value:
return member
return None
def get_model_type(deployment_model: str) -> str:
m = re.match(r'azureml://registries/[^/]+/models/([^/]+)/versions/', deployment_model)
if m is None:
print(f"Unexpected model format: {deployment_model}. Skipping", file=sys.stdout)
return None
model = m[1].lower()
if model.startswith("llama-2"):
return ModelFamily.LLAMA
elif model.startswith("tiiuae-falcon"):
return ModelFamily.FALCON
elif model.startswith("databricks-dolly-v2"):
return ModelFamily.DOLLY
elif model.startswith("gpt2"):
return ModelFamily.GPT2
else:
# Not found and\or handled. Ignore this endpoint\deployment
print(f"Unexpected model type: {model} derived from deployed model: {deployment_model}")
return None | null |
3,960 | import functools
import json
import os
import re
import requests
import sys
import time
import tempfile
from abc import abstractmethod
from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Tuple, Optional, Union
from promptflow._core.tool import ToolProvider, tool
from promptflow._sdk._constants import ConnectionType
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
from promptflow.tools.common import render_jinja_template, validate_role
from promptflow.tools.exception import (
OpenModelLLMOnlineEndpointError,
OpenModelLLMUserError,
OpenModelLLMKeyValidationError,
ChatAPIInvalidRole
)
class ModelFamily(str, Enum):
LLAMA = "LLaMa"
DOLLY = "Dolly"
GPT2 = "GPT-2"
FALCON = "Falcon"
def _missing_(cls, value):
value = value.lower()
for member in cls:
if member.lower() == value:
return member
return None
class OpenModelLLMKeyValidationError(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def validate_model_family(model_family: str):
try:
return ModelFamily[model_family]
except KeyError:
accepted_models = ",".join([model.name for model in ModelFamily])
raise OpenModelLLMKeyValidationError(
message=f"""Given model_family '{model_family}' not recognized.
Supported models are: {accepted_models}."""
) | null |
3,961 | from typing import List, Dict
from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, \
preprocess_template_string, find_referenced_image_set, convert_to_chat_list, init_azure_openai_client, \
post_process_chat_api_response, list_deployment_connections, build_deployment_dict, GPT4V_VERSION
from promptflow._internal import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
GPT4V_VERSION = "vision-preview"
def build_deployment_dict(item) -> Deployment:
model = item.properties.model
return Deployment(item.name, model.name, model.version)
def list_deployment_connections(
subscription_id,
resource_group_name,
workspace_name,
connection="",
):
try:
# Do not support dynamic list for local.
from azure.mgmt.cognitiveservices import CognitiveServicesManagementClient
from promptflow.azure.operations._arm_connection_operations import \
ArmConnectionOperations, OpenURLFailedUserError
except ImportError:
return None
# For local, subscription_id is None. Does not support dynamic list for local.
if not subscription_id:
return None
try:
credential = _get_credential()
try:
# Currently, the param 'connection' is str, not AzureOpenAIConnection type.
conn = ArmConnectionOperations._build_connection_dict(
name=connection,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
credential=credential
)
resource_id = conn.get("value").get('resource_id', "")
if not resource_id:
return None
conn_sub, conn_rg, conn_account = _parse_resource_id(resource_id)
except OpenURLFailedUserError:
return None
except ListDeploymentsError as e:
raise e
except Exception as e:
msg = f"Parsing connection with exception: {e}"
raise ListDeploymentsError(msg=msg) from e
client = CognitiveServicesManagementClient(
credential=credential,
subscription_id=conn_sub,
)
return client.deployments.list(
resource_group_name=conn_rg,
account_name=conn_account,
)
except Exception as e:
if hasattr(e, 'status_code') and e.status_code == 403:
msg = f"Failed to list deployments due to permission issue: {e}"
raise ListDeploymentsError(msg=msg) from e
else:
msg = f"Failed to list deployments with exception: {e}"
raise ListDeploymentsError(msg=msg) from e
def list_deployment_names(
subscription_id,
resource_group_name,
workspace_name,
connection=""
) -> List[Dict[str, str]]:
res = []
deployment_collection = list_deployment_connections(subscription_id, resource_group_name, workspace_name,
connection)
if not deployment_collection:
return res
for item in deployment_collection:
deployment = build_deployment_dict(item)
if deployment.version == GPT4V_VERSION:
cur_item = {
"value": deployment.name,
"display_value": deployment.name,
}
res.append(cur_item)
return res | null |
3,962 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class ChatAPIInvalidFunctions(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
pass
def validate_functions(functions):
function_example = json.dumps({
"name": "function_name",
"parameters": {
"type": "object",
"properties": {
"parameter_name": {
"type": "integer",
"description": "parameter_description"
}
}
},
"description": "function_description"
})
common_tsg = f"Here is a valid function example: {function_example}. See more details at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions " \
"or view sample 'How to use functions with chat models' in our gallery."
if len(functions) == 0:
raise ChatAPIInvalidFunctions(message=f"functions cannot be an empty list. {common_tsg}")
else:
for i, function in enumerate(functions):
# validate if the function is a dict
if not isinstance(function, dict):
raise ChatAPIInvalidFunctions(message=f"function {i} '{function}' is not a dict. {common_tsg}")
# validate if has required keys
for key in ["name", "parameters"]:
if key not in function.keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function}' does not have '{key}' property. {common_tsg}")
# validate if the parameters is a dict
if not isinstance(function["parameters"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"should be described as a JSON Schema object. {common_tsg}")
# validate if the parameters has required keys
for key in ["type", "properties"]:
if key not in function["parameters"].keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"does not have '{key}' property. {common_tsg}")
# validate if the parameters type is object
if function["parameters"]["type"] != "object":
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'type' "
f"should be 'object'. {common_tsg}")
# validate if the parameters properties is a dict
if not isinstance(function["parameters"]["properties"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'properties' "
f"should be described as a JSON Schema object. {common_tsg}") | null |
3,963 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
def validate_role(role: str, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["assistant", "function", "user", "system"]
if role not in valid_roles:
valid_roles_str = ','.join([f'\'{role}:\\n\'' for role in valid_roles])
error_message = (
f"The Chat API requires a specific format for prompt definition, and the prompt should include separate "
f"lines as role delimiters: {valid_roles_str}. Current parsed role '{role}'"
f" does not meet the requirement. If you intend to use the Completion API, please select the appropriate"
f" API type and deployment name. If you do intend to use the Chat API, please refer to the guideline at "
f"https://aka.ms/pfdoc/chat-prompt or view the samples in our gallery that contain 'Chat' in the name."
)
raise ChatAPIInvalidRole(message=error_message)
def try_parse_name_and_content(role_prompt):
# customer can add ## in front of name/content for markdown highlight.
# and we still support name/content without ## prefix for backward compatibility.
pattern = r"\n*#{0,2}\s*name:\n+\s*(\S+)\s*\n*#{0,2}\s*content:\n?(.*)"
match = re.search(pattern, role_prompt, re.DOTALL)
if match:
return match.group(1), match.group(2)
return None
def to_content_str_or_list(chat_str: str, hash2images: Mapping):
chat_str = chat_str.strip()
chunks = chat_str.split("\n")
include_image = False
result = []
for chunk in chunks:
if chunk.strip() in hash2images:
image_message = {}
image_message["type"] = "image_url"
image_url = hash2images[chunk.strip()].source_url \
if hasattr(hash2images[chunk.strip()], "source_url") else None
if not image_url:
image_bs64 = hash2images[chunk.strip()].to_base64()
image_mine_type = hash2images[chunk.strip()]._mime_type
image_url = {"url": f"data:{image_mine_type};base64,{image_bs64}"}
image_message["image_url"] = image_url
result.append(image_message)
include_image = True
elif chunk.strip() == "":
continue
else:
result.append({"type": "text", "text": chunk})
return result if include_image else chat_str
class ChatAPIFunctionRoleInvalidFormat(ToolValidationError):
"""Base exception raised when failed to validate chat api function role format."""
pass
def parse_chat(chat_str, images: List = None, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["system", "user", "assistant", "function"]
# openai chat api only supports below roles.
# customer can add single # in front of role name for markdown highlight.
# and we still support role name without # prefix for backward compatibility.
separator = r"(?i)^\s*#?\s*(" + "|".join(valid_roles) + r")\s*:\s*\n"
images = images or []
hash2images = {str(x): x for x in images}
chunks = re.split(separator, chat_str, flags=re.MULTILINE)
chat_list = []
for chunk in chunks:
last_message = chat_list[-1] if len(chat_list) > 0 else None
if last_message and "role" in last_message and "content" not in last_message:
parsed_result = try_parse_name_and_content(chunk)
if parsed_result is None:
# "name" is required if the role is "function"
if last_message["role"] == "function":
raise ChatAPIFunctionRoleInvalidFormat(
message="Failed to parse function role prompt. Please make sure the prompt follows the "
"format: 'name:\\nfunction_name\\ncontent:\\nfunction_content'. "
"'name' is required if role is function, and it should be the name of the function "
"whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, "
"with a maximum length of 64 characters. See more details in "
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-name "
"or view sample 'How to use functions with chat models' in our gallery.")
# "name" is optional for other role types.
else:
last_message["content"] = to_content_str_or_list(chunk, hash2images)
else:
last_message["name"] = parsed_result[0]
last_message["content"] = to_content_str_or_list(parsed_result[1], hash2images)
else:
if chunk.strip() == "":
continue
# Check if prompt follows chat api message format and has valid role.
# References: https://platform.openai.com/docs/api-reference/chat/create.
role = chunk.strip().lower()
validate_role(role, valid_roles=valid_roles)
new_message = {"role": role}
chat_list.append(new_message)
return chat_list | null |
3,964 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
def generate_retry_interval(retry_count: int) -> float:
min_backoff_in_sec = 3
max_backoff_in_sec = 60
retry_interval = min_backoff_in_sec + ((2 ** retry_count) - 1)
if retry_interval > max_backoff_in_sec:
retry_interval = max_backoff_in_sec
return retry_interval
def refine_extra_fields_not_permitted_error(connection, deployment_name, model):
tsg = "Please kindly avoid using vision model in LLM tool, " \
"because vision model cannot work with some chat api parameters. " \
"You can change to use tool 'Azure OpenAI GPT-4 Turbo with Vision' " \
"or 'OpenAI GPT-4V' for vision model."
try:
if isinstance(connection, AzureOpenAIConnection):
subscription_id, resource_group, workspace_name = get_workspace_triad()
if subscription_id and resource_group and workspace_name:
deployment_collection = list_deployment_connections(subscription_id, resource_group, workspace_name,
connection.name)
for item in deployment_collection:
if deployment_name == item.name:
if item.properties.model.version in [GPT4V_VERSION]:
return tsg
elif isinstance(connection, OpenAIConnection) and model in ["gpt-4-vision-preview"]:
return tsg
except Exception as e:
print(f"Exception occurs when refine extra fields not permitted error for llm: "
f"{type(e).__name__}: {str(e)}", file=sys.stderr)
return None
class WrappedOpenAIError(UserErrorException):
"""Refine error messages on top of native openai errors."""
def __init__(self, ex: OpenAIError, **kwargs):
self._ex = ex
super().__init__(target=ErrorTarget.TOOL, **kwargs)
def message(self):
return str(to_openai_error_message(self._ex))
def error_codes(self):
"""The hierarchy of the error codes.
We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style.
See the below link for details:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
This list will be converted into an error code hierarchy by the prompt flow framework.
For this case, it will be converted into a data structure that equivalent to:
{
"code": "UserError",
"innerError": {
"code": "OpenAIError",
"innerError": {
"code": self._ex.__class__.__name__,
"innerError": None
}
}
}
"""
return ["UserError", "OpenAIError", self._ex.__class__.__name__]
class ExceedMaxRetryTimes(WrappedOpenAIError):
"""Base exception raised when retry exceeds max times."""
def message(self):
return "Exceed max retry times. " + super().message
class LLMError(UserErrorException):
"""Base exception raised when failed to call openai api with non-OpenAIError."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
The provided code snippet includes necessary dependencies for implementing the `handle_openai_error` function. Write a Python function `def handle_openai_error(tries: int = 100)` to solve the following problem:
A decorator function that used to handle OpenAI error. OpenAI Error falls into retriable vs non-retriable ones. For retriable error, the decorator use below parameters to control its retry activity with exponential backoff: `tries` : max times for the function invocation, type is int 'delay': base delay seconds for exponential delay, type is float
Here is the function:
def handle_openai_error(tries: int = 100):
"""
A decorator function that used to handle OpenAI error.
OpenAI Error falls into retriable vs non-retriable ones.
For retriable error, the decorator use below parameters to control its retry activity with exponential backoff:
`tries` : max times for the function invocation, type is int
'delay': base delay seconds for exponential delay, type is float
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(tries + 1):
try:
return func(*args, **kwargs)
except (SystemErrorException, UserErrorException) as e:
# Throw inner wrapped exception directly
raise e
except (APIStatusError, APIConnectionError) as e:
# Handle retriable exception, please refer to
# https://platform.openai.com/docs/guides/error-codes/api-errors
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
# Vision model does not support all chat api parameters, e.g. response_format and function_call.
# Recommend user to use vision model in vision tools, rather than LLM tool.
# Related issue https://github.com/microsoft/promptflow/issues/1683
if isinstance(e, BadRequestError) and "extra fields not permitted" in str(e).lower():
refined_error_message = \
refine_extra_fields_not_permitted_error(args[0].connection,
kwargs.get("deployment_name", ""),
kwargs.get("model", ""))
if refined_error_message:
raise LLMError(message=f"{str(e)} {refined_error_message}")
else:
raise WrappedOpenAIError(e)
if isinstance(e, APIConnectionError) and not isinstance(e, APITimeoutError) \
and "connection aborted" not in str(e).lower():
raise WrappedOpenAIError(e)
# Retry InternalServerError(>=500), RateLimitError(429), UnprocessableEntityError(422)
if isinstance(e, APIStatusError):
status_code = e.response.status_code
if status_code < 500 and status_code not in [429, 422]:
raise WrappedOpenAIError(e)
if isinstance(e, RateLimitError) and getattr(e, "type", None) == "insufficient_quota":
# Exit retry if this is quota insufficient error
print(f"{type(e).__name__} with insufficient quota. Throw user error.", file=sys.stderr)
raise WrappedOpenAIError(e)
if i == tries:
# Exit retry if max retry reached
print(f"{type(e).__name__} reached max retry. Exit retry with user error.", file=sys.stderr)
raise ExceedMaxRetryTimes(e)
if hasattr(e, 'response') and e.response is not None:
retry_after_in_header = e.response.headers.get("retry-after", None)
else:
retry_after_in_header = None
if not retry_after_in_header:
retry_after_seconds = generate_retry_interval(i)
msg = (
f"{type(e).__name__} #{i}, but no Retry-After header, "
+ f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
else:
retry_after_seconds = float(retry_after_in_header)
msg = (
f"{type(e).__name__} #{i}, Retry-After={retry_after_in_header}, "
f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
time.sleep(retry_after_seconds)
except OpenAIError as e:
# For other non-retriable errors from OpenAIError,
# For example, AuthenticationError, APIConnectionError, BadRequestError, NotFoundError
# Mark UserError for all the non-retriable OpenAIError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
raise WrappedOpenAIError(e)
except Exception as e:
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"OpenAI API hits exception: {type(e).__name__}: {str(e)}"
raise LLMError(message=error_message)
return wrapper
return decorator | A decorator function that used to handle OpenAI error. OpenAI Error falls into retriable vs non-retriable ones. For retriable error, the decorator use below parameters to control its retry activity with exponential backoff: `tries` : max times for the function invocation, type is int 'delay': base delay seconds for exponential delay, type is float |
3,965 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
def to_bool(value) -> bool:
return str(value).lower() == "true" | null |
3,966 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class ChatAPIInvalidFunctions(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
pass
def process_function_call(function_call):
if function_call is None:
param = "auto"
elif function_call == "auto" or function_call == "none":
param = function_call
else:
function_call_example = json.dumps({"name": "function_name"})
common_tsg = f"Here is a valid example: {function_call_example}. See the guide at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call " \
"or view sample 'How to call functions with chat models' in our gallery."
param = function_call
if not isinstance(param, dict):
raise ChatAPIInvalidFunctions(
message=f"function_call parameter '{param}' must be a dict, but not {type(function_call)}. {common_tsg}"
)
else:
if "name" not in function_call:
raise ChatAPIInvalidFunctions(
message=f'function_call parameter {json.dumps(param)} must contain "name" field. {common_tsg}'
)
return param | null |
3,967 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class FunctionCallNotSupportedInStreamMode(ToolValidationError):
"""Base exception raised when use functions parameter in stream mode when call chat api."""
pass
def post_process_chat_api_response(completion, stream, functions):
if stream:
if functions is not None:
error_message = "Function calling has not been supported by stream mode yet."
raise FunctionCallNotSupportedInStreamMode(message=error_message)
def generator():
for chunk in completion:
if chunk.choices:
yield chunk.choices[0].delta.content if hasattr(chunk.choices[0].delta, 'content') and \
chunk.choices[0].delta.content is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# When calling function, function_call response will be returned as a field in message, so we need return
# message directly. Otherwise, we only return content.
if functions is not None:
return completion.model_dump()["choices"][0]["message"]
else:
# chat api may return message with no content.
return getattr(completion.choices[0].message, "content", "") | null |
3,968 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
The provided code snippet includes necessary dependencies for implementing the `preprocess_template_string` function. Write a Python function `def preprocess_template_string(template_string: str) -> str` to solve the following problem:
Remove the image input decorator from the template string and place the image input in a new line.
Here is the function:
def preprocess_template_string(template_string: str) -> str:
"""Remove the image input decorator from the template string and place the image input in a new line."""
pattern = re.compile(r'\!\[(\s*image\s*)\]\(\{\{(\s*[^\s{}]+\s*)\}\}\)')
# Find all matches in the input string
matches = pattern.findall(template_string)
# Perform substitutions
for match in matches:
original = f"![{match[0]}]({{{{{match[1]}}}}})"
replacement = f"\n{{{{{match[1]}}}}}\n"
template_string = template_string.replace(original, replacement)
return template_string | Remove the image input decorator from the template string and place the image input in a new line. |
3,969 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class ChatInputList(list):
"""
ChatInputList is a list of ChatInput objects. It is used to override the __str__ method of list to return a string
that can be easily parsed as message list.
"""
def __init__(self, iterable=None):
super().__init__(iterable or [])
def __str__(self):
return "\n".join(map(str, self))
def convert_to_chat_list(obj):
if isinstance(obj, dict):
return {key: convert_to_chat_list(value) for key, value in obj.items()}
elif isinstance(obj, list):
return ChatInputList([convert_to_chat_list(item) for item in obj])
else:
return obj | null |
3,970 | import functools
import json
import os
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError, BadRequestError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType, ListDeploymentsError, ParseConnectionError
from promptflow._cli._utils import get_workspace_triad_from_local
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
def add_referenced_images_to_set(value, image_set, image_type):
if isinstance(value, image_type):
image_set.add(value)
elif isinstance(value, list):
for item in value:
add_referenced_images_to_set(item, image_set, image_type)
elif isinstance(value, dict):
for _, item in value.items():
add_referenced_images_to_set(item, image_set, image_type)
def find_referenced_image_set(kwargs: dict):
referenced_images = set()
try:
from promptflow.contracts.multimedia import Image
for _, value in kwargs.items():
add_referenced_images_to_set(value, referenced_images, Image)
except ImportError:
pass
return referenced_images | null |
3,971 | import json
import sys
from enum import Enum
import requests
from promptflow._internal import ToolProvider, tool
from promptflow.connections import SerpConnection
from promptflow.exceptions import PromptflowException
from promptflow.tools.exception import SerpAPIUserError, SerpAPISystemError
class SafeMode(str, Enum):
ACTIVE = "active"
OFF = "off"
class Engine(str, Enum):
GOOGLE = "google"
BING = "bing"
class SerpAPI(ToolProvider):
def __init__(self, connection: SerpConnection):
super().__init__()
self.connection = connection
def extract_error_message_from_json(self, error_data):
error_message = ""
# For request was rejected. For example, the api_key is not valid
if "error" in error_data:
error_message = error_data["error"]
return str(error_message)
def safe_extract_error_message(self, response):
default_error_message = f"SerpAPI search request failed: {response.text}"
try:
# Keep the same style as SerpAPIClient
error_data = json.loads(response.text)
print(f"Response text json: {json.dumps(error_data)}", file=sys.stderr)
error_message = self.extract_error_message_from_json(error_data)
error_message = error_message if len(error_message) > 0 else default_error_message
return error_message
except Exception as e:
# Swallow any exception when extract detailed error message
print(
f"Unexpected exception occurs while extract error message "
f"from response: {type(e).__name__}: {str(e)}",
file=sys.stderr,
)
return default_error_message
# flake8: noqa: C901
def search(
self,
query: str, # this is required
location: str = None,
safe: SafeMode = SafeMode.OFF, # Not default to be SafeMode.OFF
num: int = 10,
engine: Engine = Engine.GOOGLE, # this is required
):
from serpapi import SerpApiClient
# required parameters. https://serpapi.com/search-api.
params = {
"q": query,
"location": location,
"api_key": self.connection.api_key,
}
if isinstance(engine, Engine):
params["engine"] = engine.value
else:
params["engine"] = engine
if safe == SafeMode.ACTIVE:
# Ingore invalid value and safe="off" (as default)
# For bing and google, they use diff parameters
if params["engine"].lower() == "google":
params["safe"] = "Active"
else:
params["safeSearch"] = "Strict"
if int(num) > 0:
# to combine multiple engines together, we use "num" as the parameter for such purpose
if params["engine"].lower() == "google":
params["num"] = int(num)
else:
params["count"] = int(num)
search = SerpApiClient(params)
# get response
try:
response = search.get_response()
if response.status_code == requests.codes.ok:
# default output is json
return json.loads(response.text)
else:
# Step I: Try to get accurate error message at best
error_message = self.safe_extract_error_message(response)
# Step II: Construct PromptflowException
if response.status_code >= 500:
raise SerpAPISystemError(message=error_message)
else:
raise SerpAPIUserError(message=error_message)
except Exception as e:
# SerpApi is super robust. Set basic error handle
if not isinstance(e, PromptflowException):
print(f"Unexpected exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"SerpAPI search request failed: {type(e).__name__}: {str(e)}"
raise SerpAPISystemError(message=error_message)
raise
def search(
connection: SerpConnection,
query: str, # this is required
location: str = None,
safe: SafeMode = SafeMode.OFF, # Not default to be SafeMode.OFF
num: int = 10,
engine: Engine = Engine.GOOGLE, # this is required
):
return SerpAPI(connection).search(
query=query,
location=location,
safe=safe,
num=num,
engine=engine,
) | null |
3,972 | from pathlib import Path
from ruamel.yaml import YAML
def collect_tools_from_directory(base_dir) -> dict:
tools = {}
yaml = YAML()
for f in Path(base_dir).glob("**/*.yaml"):
with open(f, "r") as f:
tools_in_file = yaml.load(f)
for identifier, tool in tools_in_file.items():
tools[identifier] = tool
return tools
The provided code snippet includes necessary dependencies for implementing the `list_package_tools` function. Write a Python function `def list_package_tools()` to solve the following problem:
List package tools
Here is the function:
def list_package_tools():
"""List package tools"""
yaml_dir = Path(__file__).parent / "yamls"
return collect_tools_from_directory(yaml_dir) | List package tools |
3,973 | from promptflow._internal import tool
from promptflow.tools.common import render_jinja_template
def render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs):
try:
return Template(prompt, trim_blocks=trim_blocks, keep_trailing_newline=keep_trailing_newline).render(**kwargs)
except Exception as e:
# For exceptions raised by jinja2 module, mark UserError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"Failed to render jinja template: {type(e).__name__}: {str(e)}. " \
+ "Please modify your prompt to fix the issue."
raise JinjaTemplateError(message=error_message) from e
def render_template_jinja2(template: str, **kwargs) -> str:
return render_jinja_template(template, trim_blocks=True, keep_trailing_newline=True, **kwargs) | null |
3,974 | from dataclasses import dataclass
from datetime import datetime
from itertools import chain
from typing import Any, List, Mapping
from promptflow._utils.exception_utils import ExceptionPresenter, RootErrorCode
from promptflow._utils.openai_metrics_calculator import OpenAIMetricsCalculator
from promptflow.contracts.run_info import RunInfo, Status
from promptflow.executor._result import AggregationResult, LineResult
def _get_node_run_infos(line_results: List[LineResult], aggr_result: AggregationResult):
line_node_run_infos = (
node_run_info for line_result in line_results for node_run_info in line_result.node_run_infos.values()
)
aggr_node_run_infos = (node_run_info for node_run_info in aggr_result.node_run_infos.values())
return chain(line_node_run_infos, aggr_node_run_infos) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.