id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
2,560
def score(input): if input[12] >= 9.725: if input[12] >= 19.23: var0 = 3.5343752 else: var0 = 5.5722494 else: if input[5] >= 6.941: var0 = 11.1947155 else: var0 = 7.4582143 if input[12] >= 5.1549997: if input[12] >= 15.0: var1 = 2.8350503 else: var1 = 4.8024607 else: if input[5] >= 7.406: var1 = 10.0011215 else: var1 = 6.787523 return 0.5 + (var0 + var1)
null
2,561
def score(input): if input[12] > 9.725000000000003: if input[12] > 16.205000000000002: var0 = 21.71499740307178 else: var0 = 22.322292901846218 else: if input[5] > 7.418000000000001: var0 = 24.75760617150803 else: var0 = 23.02910423871904 if input[5] > 6.837500000000001: if input[5] > 7.462000000000001: var1 = 2.0245964808123453 else: var1 = 0.859548540618913 else: if input[12] > 14.365: var1 = -0.7009440524656984 else: var1 = 0.052794864734003494 return var0 + var1
null
2,562
def score(input): return 36.367080746577244 + input[0] * -0.10861311354908008 + input[1] * 0.046461486329936456 + input[2] * 0.027432259970172148 + input[3] * 2.6160671309537777 + input[4] * -17.51793656329737 + input[5] * 3.7674418196772255 + input[6] * -0.000021581753164971046 + input[7] * -1.4711768622633645 + input[8] * 0.2956767140062958 + input[9] * -0.012233831527259383 + input[10] * -0.9220356453705304 + input[11] * 0.009038220462695552 + input[12] * -0.542583033714222
null
2,563
from fontTools.ttLib import TTFont, woff2 from afdko.otf2ttf import otf_to_ttf from os import path, getcwd, makedirs, listdir, remove, walk from subprocess import run from zipfile import ZipFile, ZIP_DEFLATED from urllib.request import urlopen from ttfautohint import ttfautohint from enum import Enum, unique import shutil import json import hashlib import platform if not path.exists(path.join(root, "FontPatcher")): url = "https://github.com/ryanoasis/nerd-fonts/releases/download/v3.0.2/FontPatcher.zip" print(f"Font Patcher does not exist, download from {url}") try: zip_path = path.join(root, "FontPatcher.zip") if not path.exists(zip_path): with urlopen(url) as response, open(zip_path, "wb") as out_file: shutil.copyfileobj(response, out_file) with ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(path.join(root, "FontPatcher")) remove(zip_path) except Exception as e: print( f"fail to download Font Patcher, please consider to download it manually, put downloaded 'FontPatcher.zip' in the 'source' folder and run this script again. Error: {e}" ) exit(1) if path.exists(sc_path): make_sure_eol(sc_path) run([sc_path, family_name]) def mkdirs(dir): if not path.exists(dir): makedirs(dir)
null
2,564
from fontTools.ttLib import TTFont, woff2 from afdko.otf2ttf import otf_to_ttf from os import path, getcwd, makedirs, listdir, remove, walk from subprocess import run from zipfile import ZipFile, ZIP_DEFLATED from urllib.request import urlopen from ttfautohint import ttfautohint from enum import Enum, unique import shutil import json import hashlib import platform output_path = path.join(path.dirname(root), "output") if not path.exists(path.join(root, "FontPatcher")): url = "https://github.com/ryanoasis/nerd-fonts/releases/download/v3.0.2/FontPatcher.zip" print(f"Font Patcher does not exist, download from {url}") try: zip_path = path.join(root, "FontPatcher.zip") if not path.exists(zip_path): with urlopen(url) as response, open(zip_path, "wb") as out_file: shutil.copyfileobj(response, out_file) with ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(path.join(root, "FontPatcher")) remove(zip_path) except Exception as e: print( f"fail to download Font Patcher, please consider to download it manually, put downloaded 'FontPatcher.zip' in the 'source' folder and run this script again. Error: {e}" ) exit(1) if path.exists(sc_path): make_sure_eol(sc_path) run([sc_path, family_name]) def auto_hint(f: str, ttf_path: str): ttfautohint( in_file=ttf_path, out_file=path.join(output_path, "ttf-autohint", f + ".ttf"), )
null
2,565
from fontTools.ttLib import TTFont, woff2 from afdko.otf2ttf import otf_to_ttf from os import path, getcwd, makedirs, listdir, remove, walk from subprocess import run from zipfile import ZipFile, ZIP_DEFLATED from urllib.request import urlopen from ttfautohint import ttfautohint from enum import Enum, unique import shutil import json import hashlib import platform class Status(Enum): DISABLE = "0" ENABLE = "1" IGNORE = "2" build_nerd_font = True build_config = { # font family name "family_name": "Maple Mono", # whether to enable font features by default "freeze_feature_list": { # ====== # ligatures: # Status.IGNORE: do nothing # Status.ENABLE: move font features to default ligature # Status.DISABLE: remove font features "ss01": Status.IGNORE, # == === != !== "ss02": Status.IGNORE, # [info] [trace] [debug] [warn] [error] [fatal] [vite] "ss03": Status.IGNORE, # __ "ss04": Status.IGNORE, # >= <= "ss05": Status.IGNORE, # {{ }} # ====== # character variant: # Status.IGNORE: do nothing # Status.ENABLE: enable character variants by default # Status.DISABLE: remove character variants "cv01": Status.IGNORE, # @ # $ % & Q -> => "cv02": Status.IGNORE, # alt i "cv03": Status.IGNORE, # alt a "cv04": Status.IGNORE, # alt @ "zero": Status.IGNORE, # alt 0 # ====== }, # config for nerd font # total config: generate-nerdfont.{bat/sh}:17 "nerd_font": { "mono": Status.ENABLE, # whether to use half width icon "use_hinted": Status.ENABLE, # whether to use hinted ttf to generate Nerd Font patch }, } root = getcwd() ttx_path = path.join(root, "ttx") output_path = path.join(path.dirname(root), "output") family_name = build_config["family_name"] family_name_trim = family_name.replace(" ", "") if not path.exists(path.join(root, "FontPatcher")): url = "https://github.com/ryanoasis/nerd-fonts/releases/download/v3.0.2/FontPatcher.zip" print(f"Font Patcher does not exist, download from {url}") try: zip_path = path.join(root, "FontPatcher.zip") if not path.exists(zip_path): with urlopen(url) as response, open(zip_path, "wb") as out_file: shutil.copyfileobj(response, out_file) with ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(path.join(root, "FontPatcher")) remove(zip_path) except Exception as e: print( f"fail to download Font Patcher, please consider to download it manually, put downloaded 'FontPatcher.zip' in the 'source' folder and run this script again. Error: {e}" ) exit(1) def make_sure_eol(file_path: str): if path.exists(file_path) and file_path.endswith("bat"): with open(file_path, "r+", encoding="utf-8") as f: content = f.read() f.seek(0) f.write(content.replace(r"(?<!\r)\n", "\r\n")) f.truncate() for f in listdir(ttx_path): # load font font = TTFont() font.importXML(fileOrPath=path.join(root, "ttx", f, f + ".ttx")) # check feature list feature_record = font["GSUB"].table.FeatureList.FeatureRecord feature_dict = {feature.FeatureTag: feature.Feature for feature in feature_record} calt_lookup_list = feature_dict.get("calt").LookupListIndex for key, feat in feature_dict.items(): if key == "calt": continue status = build_config["freeze_feature_list"][key] if status == Status.IGNORE: continue if status == Status.DISABLE: # clear lookup list feat.LookupListIndex = [] elif key.startswith("ss"): # to freeze styleset, target lookup list should be push into calt's lookup list calt_lookup_list.extend(feat.LookupListIndex) else: # to freeze character variants, apply the replacement of pair that defined in lookup list in cff table and hmtx table for index in feat.LookupListIndex: dict = font["GSUB"].table.LookupList.Lookup[index].SubTable[0].mapping for k, v in dict.items(): replace_glyph(k, v) # correct names _, sub = f.split("-") current_family = f"{family_name_trim}-{sub}" # correct names def set_name(name: str, id: int): font["name"].setName(name, nameID=id, platformID=3, platEncID=1, langID=0x409) def get_name(id: int): font["name"].getName(nameID=id, platformID=3, platEncID=1) set_name(family_name, 1) set_name(sub, 2) set_name(f"{family_name} {sub}; {get_name(5)}", 3) set_name(f"{family_name} {sub}", 4) set_name(current_family, 6) otf_path = path.join(output_path, "otf", f"{current_family}.otf") ttf_path = path.join(output_path, "ttf", f"{current_family}.ttf") # save otf font font.save(otf_path) # save ttf font otf_to_ttf(font) font.save(ttf_path) # auto hint auto_hint(current_family, ttf_path) font.close() # generate nerd font generate_nerd_font(current_family, f) # generate woff2 woff2.compress(otf_path, path.join(output_path, "woff2", f"{current_family}.woff2")) print("generated:", current_family) if path.exists(sc_path): make_sure_eol(sc_path) run([sc_path, family_name]) def generate_nerd_font(f: str, f_ttx: str): if not build_nerd_font: return system = platform.uname()[0] script = path.join( root, f"generate-nerdfont{'-mac' if 'Darwin' in system else ''}.{'bat' if 'Windows' in system else 'sh'}" ) make_sure_eol(script) run( [ script, f, build_config["nerd_font"]["mono"].value, build_config["nerd_font"]["use_hinted"].value, ] ) _, sub = f.split("-") mono = "Mono" if build_config["nerd_font"]["mono"] == Status.ENABLE else "" nf_path = path.join( output_path, "NF", f"{family_name_trim}NerdFont{mono}-{sub}.ttf", ) # load font nf_font = TTFont(nf_path) def set_name(name: str, id: int): nf_font["name"].setName( name, nameID=id, platformID=3, platEncID=1, langID=0x409 ) nf_font["name"].setName(name, nameID=id, platformID=1, platEncID=0, langID=0x0) def get_name(id: int): return nf_font["name"].getName(nameID=id, platformID=3, platEncID=1) def del_name(id: int): nf_font["name"].removeNames(nameID=id) # correct names set_name(f"{family_name} NF", 1) set_name(sub, 2) set_name(f"{family_name} NF {sub}; {get_name(5)}", 3) set_name(f"{family_name} NF {sub}", 4) set_name(f"{family_name_trim}NF-{sub}", 6) # remove additional names del_name(16) del_name(17) del_name(18) del_name(20) nf_font.importXML(path.join(ttx_path, f_ttx, f_ttx + ".O_S_2f_2.ttx")) # save font nf_font.save(path.join(output_path, "NF", f"{family_name_trim}-NF-{sub}.ttf")) nf_font.close() # remove original font remove(nf_path)
null
2,566
from fontTools.ttLib import TTFont, woff2 from afdko.otf2ttf import otf_to_ttf from os import path, getcwd, makedirs, listdir, remove, walk from subprocess import run from zipfile import ZipFile, ZIP_DEFLATED from urllib.request import urlopen from ttfautohint import ttfautohint from enum import Enum, unique import shutil import json import hashlib import platform print("=== [build start] ===") print(conf) def replace_glyph(old_key: str, new_key: str): cff_dict = font["CFF "].cff.values()[0].CharStrings.charStrings hmtx_dict = font["hmtx"].metrics if not ( old_key in cff_dict and old_key in hmtx_dict and new_key in cff_dict and new_key in hmtx_dict ): print(f"{old_key} or {new_key} does not exist") return else: cff_dict[old_key] = cff_dict[new_key] hmtx_dict[old_key] = hmtx_dict[new_key]
null
2,567
from fontTools.ttLib import TTFont, woff2 from afdko.otf2ttf import otf_to_ttf from os import path, getcwd, makedirs, listdir, remove, walk from subprocess import run from zipfile import ZipFile, ZIP_DEFLATED from urllib.request import urlopen from ttfautohint import ttfautohint from enum import Enum, unique import shutil import json import hashlib import platform root = getcwd() family_name_trim = family_name.replace(" ", "") if not path.exists(path.join(root, "FontPatcher")): url = "https://github.com/ryanoasis/nerd-fonts/releases/download/v3.0.2/FontPatcher.zip" print(f"Font Patcher does not exist, download from {url}") try: zip_path = path.join(root, "FontPatcher.zip") if not path.exists(zip_path): with urlopen(url) as response, open(zip_path, "wb") as out_file: shutil.copyfileobj(response, out_file) with ZipFile(zip_path, "r") as zip_ref: zip_ref.extractall(path.join(root, "FontPatcher")) remove(zip_path) except Exception as e: print( f"fail to download Font Patcher, please consider to download it manually, put downloaded 'FontPatcher.zip' in the 'source' folder and run this script again. Error: {e}" ) exit(1) if path.exists(sc_path): make_sure_eol(sc_path) run([sc_path, family_name]) with open(path.join(output_path, "build-config.json"), "w") as config_file: config_file.write(conf) def compress_folder(source_folder_path, target_path): source_folder_name = path.basename(source_folder_path) zip_path = path.join(target_path, f"{family_name_trim}-{source_folder_name}.zip") with ZipFile(zip_path, "w", compression=ZIP_DEFLATED, compresslevel=5) as zip_file: for root, dirs, files in walk(source_folder_path): for file in files: file_path = path.join(root, file) zip_file.write(file_path, path.relpath(file_path, source_folder_path)) zip_file.close() sha1 = hashlib.sha1() with open(zip_path, "rb") as zip_file: while True: data = zip_file.read(1024) if not data: break sha1.update(data) return sha1.hexdigest()
null
2,568
import argparse, os, sys, datetime, glob, importlib, csv import numpy as np import time import torch import torchvision import pytorch_lightning as pl from packaging import version from omegaconf import OmegaConf from torch.utils.data import random_split, DataLoader, Dataset, Subset from functools import partial from PIL import Image from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config, instantiate_from_config_sr from pytorch_lightning.loggers import WandbLogger def get_parser(**parser_kwargs): def str2bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("Boolean value expected.") parser = argparse.ArgumentParser(**parser_kwargs) parser.add_argument( "-n", "--name", type=str, const=True, default="", nargs="?", help="postfix for logdir", ) parser.add_argument( "-r", "--resume", type=str, const=True, default="", nargs="?", help="resume from logdir or checkpoint in logdir", ) parser.add_argument( "-b", "--base", nargs="*", metavar="base_config.yaml", help="paths to base configs. Loaded from left-to-right. " "Parameters can be overwritten or added with command-line options of the form `--key value`.", default=list(), ) parser.add_argument( "-t", "--train", type=str2bool, const=True, default=False, nargs="?", help="train", ) parser.add_argument( "--no-test", type=str2bool, const=True, default=False, nargs="?", help="disable test", ) parser.add_argument( "-p", "--project", help="name of new or path to existing project" ) parser.add_argument( "-d", "--debug", type=str2bool, nargs="?", const=True, default=False, help="enable post-mortem debugging", ) parser.add_argument( "-s", "--seed", type=int, default=23, help="seed for seed_everything", ) parser.add_argument( "-f", "--postfix", type=str, default="", help="post-postfix for default name", ) parser.add_argument( "-l", "--logdir", type=str, default="./logs", help="directory for logging dat shit", ) parser.add_argument( "--scale_lr", type=str2bool, nargs="?", const=True, default=False, help="scale base-lr by ngpu * batch_size * n_accumulate", ) return parser
null
2,569
import argparse, os, sys, datetime, glob, importlib, csv import numpy as np import time import torch import torchvision import pytorch_lightning as pl from packaging import version from omegaconf import OmegaConf from torch.utils.data import random_split, DataLoader, Dataset, Subset from functools import partial from PIL import Image from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config, instantiate_from_config_sr from pytorch_lightning.loggers import WandbLogger def nondefault_trainer_args(opt): parser = argparse.ArgumentParser() parser = Trainer.add_argparse_args(parser) args = parser.parse_args([]) return sorted(k for k in vars(args) if getattr(opt, k) != getattr(args, k))
null
2,570
import argparse, os, sys, datetime, glob, importlib, csv import numpy as np import time import torch import torchvision import pytorch_lightning as pl from packaging import version from omegaconf import OmegaConf from torch.utils.data import random_split, DataLoader, Dataset, Subset from functools import partial from PIL import Image from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config, instantiate_from_config_sr from pytorch_lightning.loggers import WandbLogger class Txt2ImgIterableBaseDataset(IterableDataset): ''' Define an interface to make the IterableDatasets for text2img data chainable ''' def __init__(self, num_records=0, valid_ids=None, size=256): super().__init__() self.num_records = num_records self.valid_ids = valid_ids self.sample_ids = valid_ids self.size = size print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') def __len__(self): return self.num_records def __iter__(self): pass def worker_init_fn(_): worker_info = torch.utils.data.get_worker_info() dataset = worker_info.dataset worker_id = worker_info.id if isinstance(dataset, Txt2ImgIterableBaseDataset): split_size = dataset.num_records // worker_info.num_workers # reset num_records to the true number to retain reliable length information dataset.sample_ids = dataset.valid_ids[worker_id * split_size:(worker_id + 1) * split_size] current_id = np.random.choice(len(np.random.get_state()[1]), 1) return np.random.seed(np.random.get_state()[1][current_id] + worker_id) else: return np.random.seed(np.random.get_state()[1][0] + worker_id)
null
2,571
import argparse, os, sys, datetime, glob, importlib, csv import numpy as np import time import torch import torchvision import pytorch_lightning as pl from packaging import version from omegaconf import OmegaConf from torch.utils.data import random_split, DataLoader, Dataset, Subset from functools import partial from PIL import Image from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config, instantiate_from_config_sr from pytorch_lightning.loggers import WandbLogger def melk(*args, **kwargs): # run all checkpoint hooks if trainer.global_rank == 0: print("Summoning checkpoint.") ckpt_path = os.path.join(ckptdir, "last.ckpt") trainer.save_checkpoint(ckpt_path)
null
2,572
import argparse, os, sys, datetime, glob, importlib, csv import numpy as np import time import torch import torchvision import pytorch_lightning as pl from packaging import version from omegaconf import OmegaConf from torch.utils.data import random_split, DataLoader, Dataset, Subset from functools import partial from PIL import Image from pytorch_lightning import seed_everything from pytorch_lightning.trainer import Trainer from pytorch_lightning.callbacks import ModelCheckpoint, Callback, LearningRateMonitor from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities import rank_zero_info from ldm.data.base import Txt2ImgIterableBaseDataset from ldm.util import instantiate_from_config, instantiate_from_config_sr from pytorch_lightning.loggers import WandbLogger def divein(*args, **kwargs): if trainer.global_rank == 0: import pudb; pudb.set_trace()
null
2,573
import sys import os import cv2 import torch import torch.nn.functional as F import gradio as gr import torchvision from torchvision.transforms.functional import normalize from ldm.util import instantiate_from_config from torch import autocast import PIL import numpy as np from pytorch_lightning import seed_everything from contextlib import nullcontext from omegaconf import OmegaConf from PIL import Image import copy from scripts.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization from scripts.util_image import ImageSpliterTh from basicsr.utils.download_util import load_file_from_url from einops import rearrange, repeat from pathlib import Path def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ())
null
2,574
import sys import os import cv2 import torch import torch.nn.functional as F import gradio as gr import torchvision from torchvision.transforms.functional import normalize from ldm.util import instantiate_from_config from torch import autocast import PIL import numpy as np from pytorch_lightning import seed_everything from contextlib import nullcontext from omegaconf import OmegaConf from PIL import Image import copy from scripts.wavelet_color_fix import wavelet_reconstruction, adaptive_instance_normalization from scripts.util_image import ImageSpliterTh from basicsr.utils.download_util import load_file_from_url from einops import rearrange, repeat from pathlib import Path torch.hub.download_url_to_file( 'https://raw.githubusercontent.com/zsyOAOA/ResShift/master/testdata/RealSet128/Lincoln.png', '01.png') torch.hub.download_url_to_file( 'https://raw.githubusercontent.com/zsyOAOA/ResShift/master/testdata/RealSet128/oldphoto6.png', '02.png') torch.hub.download_url_to_file( 'https://raw.githubusercontent.com/zsyOAOA/ResShift/master/testdata/RealSet128/comic2.png', '03.png') torch.hub.download_url_to_file( 'https://raw.githubusercontent.com/zsyOAOA/ResShift/master/testdata/RealSet128/OST_120.png', '04.png') torch.hub.download_url_to_file( 'https://raw.githubusercontent.com/zsyOAOA/ResShift/master/testdata/RealSet65/comic3.png', '05.png') def load_img(path): image = Image.open(path).convert("RGB") w, h = image.size w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = np.array(image).astype(np.float32) / 255.0 image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image) return 2.*image - 1. def space_timesteps(num_timesteps, section_counts): """ Create a list of timesteps to use from an original diffusion process, given the number of timesteps we want to take from equally-sized portions of the original process. For example, if there's 300 timesteps and the section counts are [10,15,20] then the first 100 timesteps are strided to be 10 timesteps, the second 100 are strided to be 15 timesteps, and the final 100 are strided to be 20. If the stride is a string starting with "ddim", then the fixed striding from the DDIM paper is used, and only one section is allowed. :param num_timesteps: the number of diffusion steps in the original process to divide up. :param section_counts: either a list of numbers, or a string containing comma-separated numbers, indicating the step count per section. As a special case, use "ddimN" where N is a number of steps to use the striding from the DDIM paper. :return: a set of diffusion steps from the original process to use. """ if isinstance(section_counts, str): if section_counts.startswith("ddim"): desired_count = int(section_counts[len("ddim"):]) for i in range(1, num_timesteps): if len(range(0, num_timesteps, i)) == desired_count: return set(range(0, num_timesteps, i)) raise ValueError( f"cannot create exactly {num_timesteps} steps with an integer stride" ) section_counts = [int(x) for x in section_counts.split(",")] #[250,] size_per = num_timesteps // len(section_counts) extra = num_timesteps % len(section_counts) start_idx = 0 all_steps = [] for i, section_count in enumerate(section_counts): size = size_per + (1 if i < extra else 0) if size < section_count: raise ValueError( f"cannot divide section of {size} steps into {section_count}" ) if section_count <= 1: frac_stride = 1 else: frac_stride = (size - 1) / (section_count - 1) cur_idx = 0.0 taken_steps = [] for _ in range(section_count): taken_steps.append(start_idx + round(cur_idx)) cur_idx += frac_stride all_steps += taken_steps start_idx += size return set(all_steps) def load_model_from_config(config, ckpt, verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) model.cuda() model.eval() return model device = torch.device("cuda") vq_model = load_model_from_config(vqgan_config, './weights/vqgan_cfw_00011.ckpt') vq_model = vq_model.to(device) def adaptive_instance_normalization(content_feat:Tensor, style_feat:Tensor): """Adaptive instance normalization. Adjust the reference features to have the similar color and illuminations as those in the degradate features. Args: content_feat (Tensor): The reference feature. style_feat (Tensor): The degradate features. """ size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size) def wavelet_reconstruction(content_feat:Tensor, style_feat:Tensor): """ Apply wavelet decomposition, so that the content will have the same color as the style. """ # calculate the wavelet decomposition of the content feature content_high_freq, content_low_freq = wavelet_decomposition(content_feat) del content_low_freq # calculate the wavelet decomposition of the style feature style_high_freq, style_low_freq = wavelet_decomposition(style_feat) del style_high_freq # reconstruct the content feature with the style's high frequency return content_high_freq + style_low_freq class ImageSpliterTh: def __init__(self, im, pch_size, stride, sf=1): ''' Input: im: n x c x h x w, torch tensor, float, low-resolution image in SR pch_size, stride: patch setting sf: scale factor in image super-resolution ''' assert stride <= pch_size self.stride = stride self.pch_size = pch_size self.sf = sf bs, chn, height, width= im.shape self.height_starts_list = self.extract_starts(height) self.width_starts_list = self.extract_starts(width) self.length = self.__len__() self.num_pchs = 0 self.im_ori = im self.im_res = torch.zeros([bs, chn, height*sf, width*sf], dtype=im.dtype, device=im.device) self.pixel_count = torch.zeros([bs, chn, height*sf, width*sf], dtype=im.dtype, device=im.device) def extract_starts(self, length): if length <= self.pch_size: starts = [0,] else: starts = list(range(0, length, self.stride)) for i in range(len(starts)): if starts[i] + self.pch_size > length: starts[i] = length - self.pch_size starts = sorted(set(starts), key=starts.index) return starts def __len__(self): return len(self.height_starts_list) * len(self.width_starts_list) def __iter__(self): return self def __next__(self): if self.num_pchs < self.length: w_start_idx = self.num_pchs // len(self.height_starts_list) w_start = self.width_starts_list[w_start_idx] w_end = w_start + self.pch_size h_start_idx = self.num_pchs % len(self.height_starts_list) h_start = self.height_starts_list[h_start_idx] h_end = h_start + self.pch_size pch = self.im_ori[:, :, h_start:h_end, w_start:w_end,] h_start *= self.sf h_end *= self.sf w_start *= self.sf w_end *= self.sf self.w_start, self.w_end = w_start, w_end self.h_start, self.h_end = h_start, h_end self.num_pchs += 1 else: raise StopIteration() return pch, (h_start, h_end, w_start, w_end) def update(self, pch_res, index_infos): ''' Input: pch_res: n x c x pch_size x pch_size, float index_infos: (h_start, h_end, w_start, w_end) ''' if index_infos is None: w_start, w_end = self.w_start, self.w_end h_start, h_end = self.h_start, self.h_end else: h_start, h_end, w_start, w_end = index_infos self.im_res[:, :, h_start:h_end, w_start:w_end] += pch_res self.pixel_count[:, :, h_start:h_end, w_start:w_end] += 1 def gather(self): assert torch.all(self.pixel_count != 0) return self.im_res.div(self.pixel_count) The provided code snippet includes necessary dependencies for implementing the `inference` function. Write a Python function `def inference(image, upscale, dec_w, seed, model_type, ddpm_steps, colorfix_type)` to solve the following problem: Run a single prediction on the model Here is the function: def inference(image, upscale, dec_w, seed, model_type, ddpm_steps, colorfix_type): """Run a single prediction on the model""" precision_scope = autocast vq_model.decoder.fusion_w = dec_w seed_everything(seed) if model_type == '512': config = OmegaConf.load("./configs/stableSRNew/v2-finetune_text_T_512.yaml") model = load_model_from_config(config, "./weights/stablesr_000117.ckpt") min_size = 512 else: config = OmegaConf.load("./configs/stableSRNew/v2-finetune_text_T_768v.yaml") model = load_model_from_config(config, "./weights/stablesr_768v_000139.ckpt") min_size = 768 model = model.to(device) model.configs = config model.register_schedule(given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.0120, cosine_s=8e-3) model.num_timesteps = 1000 sqrt_alphas_cumprod = copy.deepcopy(model.sqrt_alphas_cumprod) sqrt_one_minus_alphas_cumprod = copy.deepcopy(model.sqrt_one_minus_alphas_cumprod) use_timesteps = set(space_timesteps(1000, [ddpm_steps])) last_alpha_cumprod = 1.0 new_betas = [] timestep_map = [] for i, alpha_cumprod in enumerate(model.alphas_cumprod): if i in use_timesteps: new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) last_alpha_cumprod = alpha_cumprod timestep_map.append(i) new_betas = [beta.data.cpu().numpy() for beta in new_betas] model.register_schedule(given_betas=np.array(new_betas), timesteps=len(new_betas)) model.num_timesteps = 1000 model.ori_timesteps = list(use_timesteps) model.ori_timesteps.sort() model = model.to(device) try: # global try with torch.no_grad(): with precision_scope("cuda"): with model.ema_scope(): init_image = load_img(image) init_image = F.interpolate( init_image, size=(int(init_image.size(-2)*upscale), int(init_image.size(-1)*upscale)), mode='bicubic', ) if init_image.size(-1) < min_size or init_image.size(-2) < min_size: ori_size = init_image.size() rescale = min_size * 1.0 / min(init_image.size(-2), init_image.size(-1)) new_h = max(int(ori_size[-2]*rescale), min_size) new_w = max(int(ori_size[-1]*rescale), min_size) init_template = F.interpolate( init_image, size=(new_h, new_w), mode='bicubic', ) else: init_template = init_image rescale = 1 init_template = init_template.clamp(-1, 1) assert init_template.size(-1) >= min_size assert init_template.size(-2) >= min_size init_template = init_template.type(torch.float16).to(device) if init_template.size(-1) <= 1024 and init_template.size(-2) <= 1024: init_latent_generator, enc_fea_lq = vq_model.encode(init_template) init_latent = model.get_first_stage_encoding(init_latent_generator) text_init = ['']*init_template.size(0) semantic_c = model.cond_stage_model(text_init) noise = torch.randn_like(init_latent) t = repeat(torch.tensor([999]), '1 -> b', b=init_image.size(0)) t = t.to(device).long() x_T = model.q_sample_respace(x_start=init_latent, t=t, sqrt_alphas_cumprod=sqrt_alphas_cumprod, sqrt_one_minus_alphas_cumprod=sqrt_one_minus_alphas_cumprod, noise=noise) if init_template.size(-1)<= min_size and init_template.size(-2) <= min_size: samples, _ = model.sample(cond=semantic_c, struct_cond=init_latent, batch_size=init_template.size(0), timesteps=ddpm_steps, time_replace=ddpm_steps, x_T=x_T, return_intermediates=True) else: samples, _ = model.sample_canvas(cond=semantic_c, struct_cond=init_latent, batch_size=init_template.size(0), timesteps=ddpm_steps, time_replace=ddpm_steps, x_T=x_T, return_intermediates=True, tile_size=int(min_size/8), tile_overlap=min_size//16, batch_size_sample=init_template.size(0)) x_samples = vq_model.decode(samples * 1. / model.scale_factor, enc_fea_lq) if colorfix_type == 'adain': x_samples = adaptive_instance_normalization(x_samples, init_template) elif colorfix_type == 'wavelet': x_samples = wavelet_reconstruction(x_samples, init_template) x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0) else: im_spliter = ImageSpliterTh(init_template, min(init_template.size(-1), init_template.size(-2), 1024), min(init_template.size(-1)-200, init_template.size(-2)-200, 768), sf=1) for im_lq_pch, index_infos in im_spliter: init_latent = model.get_first_stage_encoding(model.encode_first_stage(im_lq_pch)) # move to latent space text_init = ['']*init_latent.size(0) semantic_c = model.cond_stage_model(text_init) noise = torch.randn_like(init_latent) # If you would like to start from the intermediate steps, you can add noise to LR to the specific steps. t = repeat(torch.tensor([999]), '1 -> b', b=init_template.size(0)) t = t.to(device).long() x_T = model.q_sample_respace(x_start=init_latent, t=t, sqrt_alphas_cumprod=sqrt_alphas_cumprod, sqrt_one_minus_alphas_cumprod=sqrt_one_minus_alphas_cumprod, noise=noise) # x_T = noise samples, _ = model.sample_canvas(cond=semantic_c, struct_cond=init_latent, batch_size=im_lq_pch.size(0), timesteps=ddpm_steps, time_replace=ddpm_steps, x_T=x_T, return_intermediates=True, tile_size=int(min_size/8), tile_overlap=min_size//16, batch_size_sample=im_lq_pch.size(0)) _, enc_fea_lq = vq_model.encode(im_lq_pch) x_samples = vq_model.decode(samples * 1. / model.scale_factor, enc_fea_lq) if colorfix_type == 'adain': x_samples = adaptive_instance_normalization(x_samples, im_lq_pch) elif colorfix_type == 'wavelet': x_samples = wavelet_reconstruction(x_samples, im_lq_pch) im_spliter.update(x_samples, index_infos) x_samples = im_spliter.gather() x_samples = torch.clamp((x_samples+1.0)/2.0, min=0.0, max=1.0) if rescale > 1: x_samples = F.interpolate( x_samples, size=(int(init_image.size(-2)), int(init_image.size(-1))), mode='bicubic', ) x_samples = x_samples.clamp(0, 1) x_sample = 255. * rearrange(x_samples[0].cpu().numpy(), 'c h w -> h w c') restored_img = x_sample.astype(np.uint8) Image.fromarray(x_sample.astype(np.uint8)).save(f'output/out.png') return restored_img, f'output/out.png' except Exception as error: print('Global exception', error) return None, None
Run a single prediction on the model
2,575
import datetime import logging import math import time import torch from os import path as osp from basicsr.data import build_dataloader, build_dataset from basicsr.data.data_sampler import EnlargedSampler from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher from basicsr.models import build_model from basicsr.utils import (AvgTimer, MessageLogger, check_resume, get_env_info, get_root_logger, get_time_str, init_tb_logger, init_wandb_logger, make_exp_dirs, mkdir_and_rename, scandir) from basicsr.utils.options import copy_opt_file, dict2str, parse_options def init_tb_loggers(opt): def create_train_val_dataloader(opt, logger): def load_resume_state(opt): class CPUPrefetcher(): def __init__(self, loader): def next(self): def reset(self): class CUDAPrefetcher(): def __init__(self, loader, opt): def preload(self): def next(self): def reset(self): def build_model(opt): def dict2str(opt, indent_level=1): def parse_options(root_path, is_train=True): def copy_opt_file(opt_file, experiments_root): def train_pipeline(root_path): # parse options, set distributed setting, set random seed opt, args = parse_options(root_path, is_train=True) opt['root_path'] = root_path torch.backends.cudnn.benchmark = True # torch.backends.cudnn.deterministic = True # load resume states if necessary resume_state = load_resume_state(opt) # mkdir for experiments and logger if resume_state is None: make_exp_dirs(opt) if opt['logger'].get('use_tb_logger') and 'debug' not in opt['name'] and opt['rank'] == 0: mkdir_and_rename(osp.join(opt['root_path'], 'tb_logger', opt['name'])) # copy the yml file to the experiment root copy_opt_file(args.opt, opt['path']['experiments_root']) # WARNING: should not use get_root_logger in the above codes, including the called functions # Otherwise the logger will not be properly initialized log_file = osp.join(opt['path']['log'], f"train_{opt['name']}_{get_time_str()}.log") logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) logger.info(get_env_info()) logger.info(dict2str(opt)) # initialize wandb and tb loggers tb_logger = init_tb_loggers(opt) # create train and validation dataloaders result = create_train_val_dataloader(opt, logger) train_loader, train_sampler, val_loaders, total_epochs, total_iters = result # create model model = build_model(opt) if resume_state: # resume training model.resume_training(resume_state) # handle optimizers and schedulers logger.info(f"Resuming training from epoch: {resume_state['epoch']}, iter: {resume_state['iter']}.") start_epoch = resume_state['epoch'] current_iter = resume_state['iter'] else: start_epoch = 0 current_iter = 0 # create message logger (formatted outputs) msg_logger = MessageLogger(opt, current_iter, tb_logger) # dataloader prefetcher prefetch_mode = opt['datasets']['train'].get('prefetch_mode') if prefetch_mode is None or prefetch_mode == 'cpu': prefetcher = CPUPrefetcher(train_loader) elif prefetch_mode == 'cuda': prefetcher = CUDAPrefetcher(train_loader, opt) logger.info(f'Use {prefetch_mode} prefetch dataloader') if opt['datasets']['train'].get('pin_memory') is not True: raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') else: raise ValueError(f"Wrong prefetch_mode {prefetch_mode}. Supported ones are: None, 'cuda', 'cpu'.") # training logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter}') data_timer, iter_timer = AvgTimer(), AvgTimer() start_time = time.time() for epoch in range(start_epoch, total_epochs + 1): train_sampler.set_epoch(epoch) prefetcher.reset() train_data = prefetcher.next() while train_data is not None: data_timer.record() current_iter += 1 if current_iter > total_iters: break # update learning rate model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) # training model.feed_data(train_data) model.optimize_parameters(current_iter) iter_timer.record() if current_iter == 1: # reset start time in msg_logger for more accurate eta_time # not work in resume mode msg_logger.reset_start_time() # log if current_iter % opt['logger']['print_freq'] == 0: log_vars = {'epoch': epoch, 'iter': current_iter} log_vars.update({'lrs': model.get_current_learning_rate()}) log_vars.update({'time': iter_timer.get_avg_time(), 'data_time': data_timer.get_avg_time()}) log_vars.update(model.get_current_log()) msg_logger(log_vars) # save models and training states if current_iter % opt['logger']['save_checkpoint_freq'] == 0: logger.info('Saving models and training states.') model.save(epoch, current_iter) # validation if opt.get('val') is not None and (current_iter % opt['val']['val_freq'] == 0): if len(val_loaders) > 1: logger.warning('Multiple validation datasets are *only* supported by SRModel.') for val_loader in val_loaders: model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) data_timer.start() iter_timer.start() train_data = prefetcher.next() # end of iter # end of epoch consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time))) logger.info(f'End of training. Time consumed: {consumed_time}') logger.info('Save the latest model.') model.save(epoch=-1, current_iter=-1) # -1 stands for the latest if opt.get('val') is not None: for val_loader in val_loaders: model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) if tb_logger: tb_logger.close()
null
2,576
import math import torch from torch import autograd as autograd from torch import nn as nn from torch.nn import functional as F from basicsr.utils.registry import LOSS_REGISTRY The provided code snippet includes necessary dependencies for implementing the `r1_penalty` function. Write a Python function `def r1_penalty(real_pred, real_img)` to solve the following problem: R1 regularization for discriminator. The core idea is to penalize the gradient on real data alone: when the generator distribution produces the true data distribution and the discriminator is equal to 0 on the data manifold, the gradient penalty ensures that the discriminator cannot create a non-zero gradient orthogonal to the data manifold without suffering a loss in the GAN game. Reference: Eq. 9 in Which training methods for GANs do actually converge. Here is the function: def r1_penalty(real_pred, real_img): """R1 regularization for discriminator. The core idea is to penalize the gradient on real data alone: when the generator distribution produces the true data distribution and the discriminator is equal to 0 on the data manifold, the gradient penalty ensures that the discriminator cannot create a non-zero gradient orthogonal to the data manifold without suffering a loss in the GAN game. Reference: Eq. 9 in Which training methods for GANs do actually converge. """ grad_real = autograd.grad(outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0] grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean() return grad_penalty
R1 regularization for discriminator. The core idea is to penalize the gradient on real data alone: when the generator distribution produces the true data distribution and the discriminator is equal to 0 on the data manifold, the gradient penalty ensures that the discriminator cannot create a non-zero gradient orthogonal to the data manifold without suffering a loss in the GAN game. Reference: Eq. 9 in Which training methods for GANs do actually converge.
2,577
import math import torch from torch import autograd as autograd from torch import nn as nn from torch.nn import functional as F from basicsr.utils.registry import LOSS_REGISTRY def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01): noise = torch.randn_like(fake_img) / math.sqrt(fake_img.shape[2] * fake_img.shape[3]) grad = autograd.grad(outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0] path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1)) path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length) path_penalty = (path_lengths - path_mean).pow(2).mean() return path_penalty, path_lengths.detach().mean(), path_mean.detach()
null
2,578
import math import torch from torch import autograd as autograd from torch import nn as nn from torch.nn import functional as F from basicsr.utils.registry import LOSS_REGISTRY The provided code snippet includes necessary dependencies for implementing the `gradient_penalty_loss` function. Write a Python function `def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None)` to solve the following problem: Calculate gradient penalty for wgan-gp. Args: discriminator (nn.Module): Network for the discriminator. real_data (Tensor): Real input data. fake_data (Tensor): Fake input data. weight (Tensor): Weight tensor. Default: None. Returns: Tensor: A tensor for gradient penalty. Here is the function: def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None): """Calculate gradient penalty for wgan-gp. Args: discriminator (nn.Module): Network for the discriminator. real_data (Tensor): Real input data. fake_data (Tensor): Fake input data. weight (Tensor): Weight tensor. Default: None. Returns: Tensor: A tensor for gradient penalty. """ batch_size = real_data.size(0) alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1)) # interpolate between real_data and fake_data interpolates = alpha * real_data + (1. - alpha) * fake_data interpolates = autograd.Variable(interpolates, requires_grad=True) disc_interpolates = discriminator(interpolates) gradients = autograd.grad( outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones_like(disc_interpolates), create_graph=True, retain_graph=True, only_inputs=True)[0] if weight is not None: gradients = gradients * weight gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean() if weight is not None: gradients_penalty /= torch.mean(weight) return gradients_penalty
Calculate gradient penalty for wgan-gp. Args: discriminator (nn.Module): Network for the discriminator. real_data (Tensor): Real input data. fake_data (Tensor): Fake input data. weight (Tensor): Weight tensor. Default: None. Returns: Tensor: A tensor for gradient penalty.
2,579
import functools import torch from torch.nn import functional as F def weight_reduce_loss(loss, weight=None, reduction='mean'): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. Default: None. reduction (str): Same as built-in losses of PyTorch. Options are 'none', 'mean' and 'sum'. Default: 'mean'. Returns: Tensor: Loss values. """ # if weight is specified, apply element-wise weight if weight is not None: assert weight.dim() == loss.dim() assert weight.size(1) == 1 or weight.size(1) == loss.size(1) loss = loss * weight # if weight is not specified or reduction is sum, just reduce the loss if weight is None or reduction == 'sum': loss = reduce_loss(loss, reduction) # if reduction is mean, then compute mean over weight region elif reduction == 'mean': if weight.size(1) > 1: weight = weight.sum() else: weight = weight.sum() * loss.size(1) loss = loss.sum() / weight return loss The provided code snippet includes necessary dependencies for implementing the `weighted_loss` function. Write a Python function `def weighted_loss(loss_func)` to solve the following problem: Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) Here is the function: def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', **kwargs): # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction) return loss return wrapper
Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.5000) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, reduction='sum') tensor(3.)
2,580
import functools import torch from torch.nn import functional as F def get_local_weights(residual, ksize): """Get local weights for generating the artifact map of LDL. It is only called by the `get_refined_artifact_map` function. Args: residual (Tensor): Residual between predicted and ground truth images. ksize (Int): size of the local window. Returns: Tensor: weight for each pixel to be discriminated as an artifact pixel """ pad = (ksize - 1) // 2 residual_pad = F.pad(residual, pad=[pad, pad, pad, pad], mode='reflect') unfolded_residual = residual_pad.unfold(2, ksize, 1).unfold(3, ksize, 1) pixel_level_weight = torch.var(unfolded_residual, dim=(-1, -2), unbiased=True, keepdim=True).squeeze(-1).squeeze(-1) return pixel_level_weight The provided code snippet includes necessary dependencies for implementing the `get_refined_artifact_map` function. Write a Python function `def get_refined_artifact_map(img_gt, img_output, img_ema, ksize)` to solve the following problem: Calculate the artifact map of LDL (Details or Artifacts: A Locally Discriminative Learning Approach to Realistic Image Super-Resolution. In CVPR 2022) Args: img_gt (Tensor): ground truth images. img_output (Tensor): output images given by the optimizing model. img_ema (Tensor): output images given by the ema model. ksize (Int): size of the local window. Returns: overall_weight: weight for each pixel to be discriminated as an artifact pixel (calculated based on both local and global observations). Here is the function: def get_refined_artifact_map(img_gt, img_output, img_ema, ksize): """Calculate the artifact map of LDL (Details or Artifacts: A Locally Discriminative Learning Approach to Realistic Image Super-Resolution. In CVPR 2022) Args: img_gt (Tensor): ground truth images. img_output (Tensor): output images given by the optimizing model. img_ema (Tensor): output images given by the ema model. ksize (Int): size of the local window. Returns: overall_weight: weight for each pixel to be discriminated as an artifact pixel (calculated based on both local and global observations). """ residual_ema = torch.sum(torch.abs(img_gt - img_ema), 1, keepdim=True) residual_sr = torch.sum(torch.abs(img_gt - img_output), 1, keepdim=True) patch_level_weight = torch.var(residual_sr.clone(), dim=(-1, -2, -3), keepdim=True)**(1 / 5) pixel_level_weight = get_local_weights(residual_sr.clone(), ksize) overall_weight = patch_level_weight * pixel_level_weight overall_weight[residual_sr < residual_ema] = 0 return overall_weight
Calculate the artifact map of LDL (Details or Artifacts: A Locally Discriminative Learning Approach to Realistic Image Super-Resolution. In CVPR 2022) Args: img_gt (Tensor): ground truth images. img_output (Tensor): output images given by the optimizing model. img_ema (Tensor): output images given by the ema model. ksize (Int): size of the local window. Returns: overall_weight: weight for each pixel to be discriminated as an artifact pixel (calculated based on both local and global observations).
2,581
import torch from torch import nn as nn from torch.nn import functional as F from basicsr.archs.vgg_arch import VGGFeatureExtractor from basicsr.utils.registry import LOSS_REGISTRY from .loss_util import weighted_loss def l1_loss(pred, target): return F.l1_loss(pred, target, reduction='none')
null
2,582
import torch from torch import nn as nn from torch.nn import functional as F from basicsr.archs.vgg_arch import VGGFeatureExtractor from basicsr.utils.registry import LOSS_REGISTRY from .loss_util import weighted_loss def mse_loss(pred, target): return F.mse_loss(pred, target, reduction='none')
null
2,583
import torch from torch import nn as nn from torch.nn import functional as F from basicsr.archs.vgg_arch import VGGFeatureExtractor from basicsr.utils.registry import LOSS_REGISTRY from .loss_util import weighted_loss def charbonnier_loss(pred, target, eps=1e-12): return torch.sqrt((pred - target)**2 + eps)
null
2,584
import cv2 import math import numpy as np import os from scipy.ndimage import convolve from scipy.special import gamma from basicsr.metrics.metric_util import reorder_image, to_y_channel from basicsr.utils.matlab_functions import imresize from basicsr.utils.registry import METRIC_REGISTRY def niqe(img, mu_pris_param, cov_pris_param, gaussian_window, block_size_h=96, block_size_w=96): """Calculate NIQE (Natural Image Quality Evaluator) metric. ``Paper: Making a "Completely Blind" Image Quality Analyzer`` This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip Note that we do not include block overlap height and width, since they are always 0 in the official implementation. For good performance, it is advisable by the official implementation to divide the distorted image in to the same size patched as used for the construction of multivariate Gaussian model. Args: img (ndarray): Input image whose quality needs to be computed. The image must be a gray or Y (of YCbCr) image with shape (h, w). Range [0, 255] with float type. mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian model calculated on the pristine dataset. cov_pris_param (ndarray): Covariance of a pre-defined multivariate Gaussian model calculated on the pristine dataset. gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the image. block_size_h (int): Height of the blocks in to which image is divided. Default: 96 (the official recommended value). block_size_w (int): Width of the blocks in to which image is divided. Default: 96 (the official recommended value). """ assert img.ndim == 2, ('Input image must be a gray or Y (of YCbCr) image with shape (h, w).') # crop image h, w = img.shape num_block_h = math.floor(h / block_size_h) num_block_w = math.floor(w / block_size_w) img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w] distparam = [] # dist param is actually the multiscale features for scale in (1, 2): # perform on two scales (1, 2) mu = convolve(img, gaussian_window, mode='nearest') sigma = np.sqrt(np.abs(convolve(np.square(img), gaussian_window, mode='nearest') - np.square(mu))) # normalize, as in Eq. 1 in the paper img_nomalized = (img - mu) / (sigma + 1) feat = [] for idx_w in range(num_block_w): for idx_h in range(num_block_h): # process ecah block block = img_nomalized[idx_h * block_size_h // scale:(idx_h + 1) * block_size_h // scale, idx_w * block_size_w // scale:(idx_w + 1) * block_size_w // scale] feat.append(compute_feature(block)) distparam.append(np.array(feat)) if scale == 1: img = imresize(img / 255., scale=0.5, antialiasing=True) img = img * 255. distparam = np.concatenate(distparam, axis=1) # fit a MVG (multivariate Gaussian) model to distorted patch features mu_distparam = np.nanmean(distparam, axis=0) # use nancov. ref: https://ww2.mathworks.cn/help/stats/nancov.html distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)] cov_distparam = np.cov(distparam_no_nan, rowvar=False) # compute niqe quality, Eq. 10 in the paper invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2) quality = np.matmul( np.matmul((mu_pris_param - mu_distparam), invcov_param), np.transpose((mu_pris_param - mu_distparam))) quality = np.sqrt(quality) quality = float(np.squeeze(quality)) return quality def reorder_image(img, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img.shape) == 2: img = img[..., None] if input_order == 'CHW': img = img.transpose(1, 2, 0) return img def to_y_channel(img): """Change to Y channel of YCbCr. Args: img (ndarray): Images with range [0, 255]. Returns: (ndarray): Images with range [0, 255] (float type) without round. """ img = img.astype(np.float32) / 255. if img.ndim == 3 and img.shape[2] == 3: img = bgr2ycbcr(img, y_only=True) img = img[..., None] return img * 255. The provided code snippet includes necessary dependencies for implementing the `calculate_niqe` function. Write a Python function `def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y', **kwargs)` to solve the following problem: Calculate NIQE (Natural Image Quality Evaluator) metric. ``Paper: Making a "Completely Blind" Image Quality Analyzer`` This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip > MATLAB R2021a result for tests/data/baboon.png: 5.72957338 (5.7296) > Our re-implementation result for tests/data/baboon.png: 5.7295763 (5.7296) We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'. Default: 'y'. Returns: float: NIQE result. Here is the function: def calculate_niqe(img, crop_border, input_order='HWC', convert_to='y', **kwargs): """Calculate NIQE (Natural Image Quality Evaluator) metric. ``Paper: Making a "Completely Blind" Image Quality Analyzer`` This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip > MATLAB R2021a result for tests/data/baboon.png: 5.72957338 (5.7296) > Our re-implementation result for tests/data/baboon.png: 5.7295763 (5.7296) We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'. Default: 'y'. Returns: float: NIQE result. """ ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) # we use the official params estimated from the pristine dataset. niqe_pris_params = np.load(os.path.join(ROOT_DIR, 'niqe_pris_params.npz')) mu_pris_param = niqe_pris_params['mu_pris_param'] cov_pris_param = niqe_pris_params['cov_pris_param'] gaussian_window = niqe_pris_params['gaussian_window'] img = img.astype(np.float32) if input_order != 'HW': img = reorder_image(img, input_order=input_order) if convert_to == 'y': img = to_y_channel(img) elif convert_to == 'gray': img = cv2.cvtColor(img / 255., cv2.COLOR_BGR2GRAY) * 255. img = np.squeeze(img) if crop_border != 0: img = img[crop_border:-crop_border, crop_border:-crop_border] # round is necessary for being consistent with MATLAB's result img = img.round() niqe_result = niqe(img, mu_pris_param, cov_pris_param, gaussian_window) return niqe_result
Calculate NIQE (Natural Image Quality Evaluator) metric. ``Paper: Making a "Completely Blind" Image Quality Analyzer`` This implementation could produce almost the same results as the official MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip > MATLAB R2021a result for tests/data/baboon.png: 5.72957338 (5.7296) > Our re-implementation result for tests/data/baboon.png: 5.7295763 (5.7296) We use the official params estimated from the pristine dataset. We use the recommended block size (96, 96) without overlaps. Args: img (ndarray): Input image whose quality needs to be computed. The input image must be in range [0, 255] with float/int type. The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order) If the input order is 'HWC' or 'CHW', it will be converted to gray or Y (of YCbCr) image according to the ``convert_to`` argument. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the metric calculation. input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'. Default: 'HWC'. convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'. Default: 'y'. Returns: float: NIQE result.
2,585
import numpy as np import torch import torch.nn as nn from scipy import linalg from tqdm import tqdm from basicsr.archs.inception import InceptionV3 class InceptionV3(nn.Module): """Pretrained InceptionV3 network returning feature maps""" # Index of default block of inception to return, # corresponds to output of final average pooling DEFAULT_BLOCK_INDEX = 3 # Maps feature dimensionality to their output blocks indices BLOCK_INDEX_BY_DIM = { 64: 0, # First max pooling features 192: 1, # Second max pooling features 768: 2, # Pre-aux classifier features 2048: 3 # Final average pooling features } def __init__(self, output_blocks=(DEFAULT_BLOCK_INDEX), resize_input=True, normalize_input=True, requires_grad=False, use_fid_inception=True): """Build pretrained InceptionV3. Args: output_blocks (list[int]): Indices of blocks to return features of. Possible values are: - 0: corresponds to output of first max pooling - 1: corresponds to output of second max pooling - 2: corresponds to output which is fed to aux classifier - 3: corresponds to output of final average pooling resize_input (bool): If true, bilinearly resizes input to width and height 299 before feeding input to model. As the network without fully connected layers is fully convolutional, it should be able to handle inputs of arbitrary size, so resizing might not be strictly needed. Default: True. normalize_input (bool): If true, scales the input from range (0, 1) to the range the pretrained Inception network expects, namely (-1, 1). Default: True. requires_grad (bool): If true, parameters of the model require gradients. Possibly useful for finetuning the network. Default: False. use_fid_inception (bool): If true, uses the pretrained Inception model used in Tensorflow's FID implementation. If false, uses the pretrained Inception model available in torchvision. The FID Inception model has different weights and a slightly different structure from torchvision's Inception model. If you want to compute FID scores, you are strongly advised to set this parameter to true to get comparable results. Default: True. """ super(InceptionV3, self).__init__() self.resize_input = resize_input self.normalize_input = normalize_input self.output_blocks = sorted(output_blocks) self.last_needed_block = max(output_blocks) assert self.last_needed_block <= 3, ('Last possible output block index is 3') self.blocks = nn.ModuleList() if use_fid_inception: inception = fid_inception_v3() else: try: inception = models.inception_v3(pretrained=True, init_weights=False) except TypeError: # pytorch < 1.5 does not have init_weights for inception_v3 inception = models.inception_v3(pretrained=True) # Block 0: input to maxpool1 block0 = [ inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3, inception.Conv2d_2b_3x3, nn.MaxPool2d(kernel_size=3, stride=2) ] self.blocks.append(nn.Sequential(*block0)) # Block 1: maxpool1 to maxpool2 if self.last_needed_block >= 1: block1 = [inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3, nn.MaxPool2d(kernel_size=3, stride=2)] self.blocks.append(nn.Sequential(*block1)) # Block 2: maxpool2 to aux classifier if self.last_needed_block >= 2: block2 = [ inception.Mixed_5b, inception.Mixed_5c, inception.Mixed_5d, inception.Mixed_6a, inception.Mixed_6b, inception.Mixed_6c, inception.Mixed_6d, inception.Mixed_6e, ] self.blocks.append(nn.Sequential(*block2)) # Block 3: aux classifier to final avgpool if self.last_needed_block >= 3: block3 = [ inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c, nn.AdaptiveAvgPool2d(output_size=(1, 1)) ] self.blocks.append(nn.Sequential(*block3)) for param in self.parameters(): param.requires_grad = requires_grad def forward(self, x): """Get Inception feature maps. Args: x (Tensor): Input tensor of shape (b, 3, h, w). Values are expected to be in range (-1, 1). You can also input (0, 1) with setting normalize_input = True. Returns: list[Tensor]: Corresponding to the selected output block, sorted ascending by index. """ output = [] if self.resize_input: x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=False) if self.normalize_input: x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1) for idx, block in enumerate(self.blocks): x = block(x) if idx in self.output_blocks: output.append(x) if idx == self.last_needed_block: break return output def load_patched_inception_v3(device='cuda', resize_input=True, normalize_input=False): # we may not resize the input, but in [rosinality/stylegan2-pytorch] it # does resize the input. inception = InceptionV3([3], resize_input=resize_input, normalize_input=normalize_input) inception = nn.DataParallel(inception).eval().to(device) return inception
null
2,586
import numpy as np import torch import torch.nn as nn from scipy import linalg from tqdm import tqdm from basicsr.archs.inception import InceptionV3 The provided code snippet includes necessary dependencies for implementing the `extract_inception_features` function. Write a Python function `def extract_inception_features(data_generator, inception, len_generator=None, device='cuda')` to solve the following problem: Extract inception features. Args: data_generator (generator): A data generator. inception (nn.Module): Inception model. len_generator (int): Length of the data_generator to show the progressbar. Default: None. device (str): Device. Default: cuda. Returns: Tensor: Extracted features. Here is the function: def extract_inception_features(data_generator, inception, len_generator=None, device='cuda'): """Extract inception features. Args: data_generator (generator): A data generator. inception (nn.Module): Inception model. len_generator (int): Length of the data_generator to show the progressbar. Default: None. device (str): Device. Default: cuda. Returns: Tensor: Extracted features. """ if len_generator is not None: pbar = tqdm(total=len_generator, unit='batch', desc='Extract') else: pbar = None features = [] for data in data_generator: if pbar: pbar.update(1) data = data.to(device) feature = inception(data)[0].view(data.shape[0], -1) features.append(feature.to('cpu')) if pbar: pbar.close() features = torch.cat(features, 0) return features
Extract inception features. Args: data_generator (generator): A data generator. inception (nn.Module): Inception model. len_generator (int): Length of the data_generator to show the progressbar. Default: None. device (str): Device. Default: cuda. Returns: Tensor: Extracted features.
2,587
import numpy as np import torch import torch.nn as nn from scipy import linalg from tqdm import tqdm from basicsr.archs.inception import InceptionV3 The provided code snippet includes necessary dependencies for implementing the `calculate_fid` function. Write a Python function `def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6)` to solve the following problem: Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is: d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Args: mu1 (np.array): The sample mean over activations. sigma1 (np.array): The covariance matrix over activations for generated samples. mu2 (np.array): The sample mean over activations, precalculated on an representative data set. sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. Returns: float: The Frechet Distance. Here is the function: def calculate_fid(mu1, sigma1, mu2, sigma2, eps=1e-6): """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is: d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Args: mu1 (np.array): The sample mean over activations. sigma1 (np.array): The covariance matrix over activations for generated samples. mu2 (np.array): The sample mean over activations, precalculated on an representative data set. sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. Returns: float: The Frechet Distance. """ assert mu1.shape == mu2.shape, 'Two mean vectors have different lengths' assert sigma1.shape == sigma2.shape, ('Two covariances have different dimensions') cov_sqrt, _ = linalg.sqrtm(sigma1 @ sigma2, disp=False) # Product might be almost singular if not np.isfinite(cov_sqrt).all(): print('Product of cov matrices is singular. Adding {eps} to diagonal of cov estimates') offset = np.eye(sigma1.shape[0]) * eps cov_sqrt = linalg.sqrtm((sigma1 + offset) @ (sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(cov_sqrt): if not np.allclose(np.diagonal(cov_sqrt).imag, 0, atol=1e-3): m = np.max(np.abs(cov_sqrt.imag)) raise ValueError(f'Imaginary component {m}') cov_sqrt = cov_sqrt.real mean_diff = mu1 - mu2 mean_norm = mean_diff @ mean_diff trace = np.trace(sigma1) + np.trace(sigma2) - 2 * np.trace(cov_sqrt) fid = mean_norm + trace return fid
Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is: d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Args: mu1 (np.array): The sample mean over activations. sigma1 (np.array): The covariance matrix over activations for generated samples. mu2 (np.array): The sample mean over activations, precalculated on an representative data set. sigma2 (np.array): The covariance matrix over activations, precalculated on an representative data set. Returns: float: The Frechet Distance.
2,588
import cv2 import numpy as np import torch import torch.nn.functional as F from basicsr.metrics.metric_util import reorder_image, to_y_channel from basicsr.utils.color_util import rgb2ycbcr_pt from basicsr.utils.registry import METRIC_REGISTRY def reorder_image(img, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img.shape) == 2: img = img[..., None] if input_order == 'CHW': img = img.transpose(1, 2, 0) return img def to_y_channel(img): """Change to Y channel of YCbCr. Args: img (ndarray): Images with range [0, 255]. Returns: (ndarray): Images with range [0, 255] (float type) without round. """ img = img.astype(np.float32) / 255. if img.ndim == 3 and img.shape[2] == 3: img = bgr2ycbcr(img, y_only=True) img = img[..., None] return img * 255. The provided code snippet includes necessary dependencies for implementing the `calculate_psnr` function. Write a Python function `def calculate_psnr(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs)` to solve the following problem: Calculate PSNR (Peak Signal-to-Noise Ratio). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result. Here is the function: def calculate_psnr(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs): """Calculate PSNR (Peak Signal-to-Noise Ratio). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result. """ assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') img = reorder_image(img, input_order=input_order) img2 = reorder_image(img2, input_order=input_order) if crop_border != 0: img = img[crop_border:-crop_border, crop_border:-crop_border, ...] img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] if test_y_channel: img = to_y_channel(img) img2 = to_y_channel(img2) img = img.astype(np.float64) img2 = img2.astype(np.float64) mse = np.mean((img - img2)**2) if mse == 0: return float('inf') return 10. * np.log10(255. * 255. / mse)
Calculate PSNR (Peak Signal-to-Noise Ratio). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result.
2,589
import cv2 import numpy as np import torch import torch.nn.functional as F from basicsr.metrics.metric_util import reorder_image, to_y_channel from basicsr.utils.color_util import rgb2ycbcr_pt from basicsr.utils.registry import METRIC_REGISTRY def rgb2ycbcr_pt(img, y_only=False): """Convert RGB images to YCbCr images (PyTorch version). It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. Args: img (Tensor): Images with shape (n, 3, h, w), the range [0, 1], float, RGB format. y_only (bool): Whether to only return Y channel. Default: False. Returns: (Tensor): converted images with the shape (n, 3/1, h, w), the range [0, 1], float. """ if y_only: weight = torch.tensor([[65.481], [128.553], [24.966]]).to(img) out_img = torch.matmul(img.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + 16.0 else: weight = torch.tensor([[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]).to(img) bias = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(img) out_img = torch.matmul(img.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + bias out_img = out_img / 255. return out_img The provided code snippet includes necessary dependencies for implementing the `calculate_psnr_pt` function. Write a Python function `def calculate_psnr_pt(img, img2, crop_border, test_y_channel=False, **kwargs)` to solve the following problem: Calculate PSNR (Peak Signal-to-Noise Ratio) (PyTorch version). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result. Here is the function: def calculate_psnr_pt(img, img2, crop_border, test_y_channel=False, **kwargs): """Calculate PSNR (Peak Signal-to-Noise Ratio) (PyTorch version). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result. """ assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') if crop_border != 0: img = img[:, :, crop_border:-crop_border, crop_border:-crop_border] img2 = img2[:, :, crop_border:-crop_border, crop_border:-crop_border] if test_y_channel: img = rgb2ycbcr_pt(img, y_only=True) img2 = rgb2ycbcr_pt(img2, y_only=True) img = img.to(torch.float64) img2 = img2.to(torch.float64) mse = torch.mean((img - img2)**2, dim=[1, 2, 3]) return 10. * torch.log10(1. / (mse + 1e-8))
Calculate PSNR (Peak Signal-to-Noise Ratio) (PyTorch version). Reference: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: PSNR result.
2,590
import cv2 import numpy as np import torch import torch.nn.functional as F from basicsr.metrics.metric_util import reorder_image, to_y_channel from basicsr.utils.color_util import rgb2ycbcr_pt from basicsr.utils.registry import METRIC_REGISTRY def _ssim(img, img2): """Calculate SSIM (structural similarity) for one channel images. It is called by func:`calculate_ssim`. Args: img (ndarray): Images with range [0, 255] with order 'HWC'. img2 (ndarray): Images with range [0, 255] with order 'HWC'. Returns: float: SSIM result. """ c1 = (0.01 * 255)**2 c2 = (0.03 * 255)**2 kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) mu1 = cv2.filter2D(img, -1, window)[5:-5, 5:-5] # valid mode for window size 11 mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] mu1_sq = mu1**2 mu2_sq = mu2**2 mu1_mu2 = mu1 * mu2 sigma1_sq = cv2.filter2D(img**2, -1, window)[5:-5, 5:-5] - mu1_sq sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq sigma12 = cv2.filter2D(img * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 ssim_map = ((2 * mu1_mu2 + c1) * (2 * sigma12 + c2)) / ((mu1_sq + mu2_sq + c1) * (sigma1_sq + sigma2_sq + c2)) return ssim_map.mean() def reorder_image(img, input_order='HWC'): """Reorder images to 'HWC' order. If the input_order is (h, w), return (h, w, 1); If the input_order is (c, h, w), return (h, w, c); If the input_order is (h, w, c), return as it is. Args: img (ndarray): Input image. input_order (str): Whether the input order is 'HWC' or 'CHW'. If the input image shape is (h, w), input_order will not have effects. Default: 'HWC'. Returns: ndarray: reordered image. """ if input_order not in ['HWC', 'CHW']: raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") if len(img.shape) == 2: img = img[..., None] if input_order == 'CHW': img = img.transpose(1, 2, 0) return img def to_y_channel(img): """Change to Y channel of YCbCr. Args: img (ndarray): Images with range [0, 255]. Returns: (ndarray): Images with range [0, 255] (float type) without round. """ img = img.astype(np.float32) / 255. if img.ndim == 3 and img.shape[2] == 3: img = bgr2ycbcr(img, y_only=True) img = img[..., None] return img * 255. The provided code snippet includes necessary dependencies for implementing the `calculate_ssim` function. Write a Python function `def calculate_ssim(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs)` to solve the following problem: Calculate SSIM (structural similarity). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result. Here is the function: def calculate_ssim(img, img2, crop_border, input_order='HWC', test_y_channel=False, **kwargs): """Calculate SSIM (structural similarity). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result. """ assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') if input_order not in ['HWC', 'CHW']: raise ValueError(f'Wrong input_order {input_order}. Supported input_orders are "HWC" and "CHW"') img = reorder_image(img, input_order=input_order) img2 = reorder_image(img2, input_order=input_order) if crop_border != 0: img = img[crop_border:-crop_border, crop_border:-crop_border, ...] img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...] if test_y_channel: img = to_y_channel(img) img2 = to_y_channel(img2) img = img.astype(np.float64) img2 = img2.astype(np.float64) ssims = [] for i in range(img.shape[2]): ssims.append(_ssim(img[..., i], img2[..., i])) return np.array(ssims).mean()
Calculate SSIM (structural similarity). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (ndarray): Images with range [0, 255]. img2 (ndarray): Images with range [0, 255]. crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. input_order (str): Whether the input order is 'HWC' or 'CHW'. Default: 'HWC'. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result.
2,591
import cv2 import numpy as np import torch import torch.nn.functional as F from basicsr.metrics.metric_util import reorder_image, to_y_channel from basicsr.utils.color_util import rgb2ycbcr_pt from basicsr.utils.registry import METRIC_REGISTRY def _ssim_pth(img, img2): """Calculate SSIM (structural similarity) (PyTorch version). It is called by func:`calculate_ssim_pt`. Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). Returns: float: SSIM result. """ c1 = (0.01 * 255)**2 c2 = (0.03 * 255)**2 kernel = cv2.getGaussianKernel(11, 1.5) window = np.outer(kernel, kernel.transpose()) window = torch.from_numpy(window).view(1, 1, 11, 11).expand(img.size(1), 1, 11, 11).to(img.dtype).to(img.device) mu1 = F.conv2d(img, window, stride=1, padding=0, groups=img.shape[1]) # valid mode mu2 = F.conv2d(img2, window, stride=1, padding=0, groups=img2.shape[1]) # valid mode mu1_sq = mu1.pow(2) mu2_sq = mu2.pow(2) mu1_mu2 = mu1 * mu2 sigma1_sq = F.conv2d(img * img, window, stride=1, padding=0, groups=img.shape[1]) - mu1_sq sigma2_sq = F.conv2d(img2 * img2, window, stride=1, padding=0, groups=img.shape[1]) - mu2_sq sigma12 = F.conv2d(img * img2, window, stride=1, padding=0, groups=img.shape[1]) - mu1_mu2 cs_map = (2 * sigma12 + c2) / (sigma1_sq + sigma2_sq + c2) ssim_map = ((2 * mu1_mu2 + c1) / (mu1_sq + mu2_sq + c1)) * cs_map return ssim_map.mean([1, 2, 3]) def rgb2ycbcr_pt(img, y_only=False): """Convert RGB images to YCbCr images (PyTorch version). It implements the ITU-R BT.601 conversion for standard-definition television. See more details in https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion. Args: img (Tensor): Images with shape (n, 3, h, w), the range [0, 1], float, RGB format. y_only (bool): Whether to only return Y channel. Default: False. Returns: (Tensor): converted images with the shape (n, 3/1, h, w), the range [0, 1], float. """ if y_only: weight = torch.tensor([[65.481], [128.553], [24.966]]).to(img) out_img = torch.matmul(img.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + 16.0 else: weight = torch.tensor([[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]).to(img) bias = torch.tensor([16, 128, 128]).view(1, 3, 1, 1).to(img) out_img = torch.matmul(img.permute(0, 2, 3, 1), weight).permute(0, 3, 1, 2) + bias out_img = out_img / 255. return out_img The provided code snippet includes necessary dependencies for implementing the `calculate_ssim_pt` function. Write a Python function `def calculate_ssim_pt(img, img2, crop_border, test_y_channel=False, **kwargs)` to solve the following problem: Calculate SSIM (structural similarity) (PyTorch version). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result. Here is the function: def calculate_ssim_pt(img, img2, crop_border, test_y_channel=False, **kwargs): """Calculate SSIM (structural similarity) (PyTorch version). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result. """ assert img.shape == img2.shape, (f'Image shapes are different: {img.shape}, {img2.shape}.') if crop_border != 0: img = img[:, :, crop_border:-crop_border, crop_border:-crop_border] img2 = img2[:, :, crop_border:-crop_border, crop_border:-crop_border] if test_y_channel: img = rgb2ycbcr_pt(img, y_only=True) img2 = rgb2ycbcr_pt(img2, y_only=True) img = img.to(torch.float64) img2 = img2.to(torch.float64) ssim = _ssim_pth(img * 255., img2 * 255.) return ssim
Calculate SSIM (structural similarity) (PyTorch version). ``Paper: Image quality assessment: From error visibility to structural similarity`` The results are the same as that of the official released MATLAB code in https://ece.uwaterloo.ca/~z70wang/research/ssim/. For three-channel images, SSIM is calculated for each channel and then averaged. Args: img (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). img2 (Tensor): Images with range [0, 1], shape (n, 3/1, h, w). crop_border (int): Cropped pixels in each edge of an image. These pixels are not involved in the calculation. test_y_channel (bool): Test on Y channel of YCbCr. Default: False. Returns: float: SSIM result.
2,592
import cv2 import random import torch The provided code snippet includes necessary dependencies for implementing the `paired_random_crop` function. Write a Python function `def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None)` to solve the following problem: Paired random crop. Support Numpy array and Tensor inputs. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Default: None. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray. Here is the function: def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None): """Paired random crop. Support Numpy array and Tensor inputs. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Default: None. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray. """ if not isinstance(img_gts, list): img_gts = [img_gts] if not isinstance(img_lqs, list): img_lqs = [img_lqs] # determine input type: Numpy array or Tensor input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy' if input_type == 'Tensor': h_lq, w_lq = img_lqs[0].size()[-2:] h_gt, w_gt = img_gts[0].size()[-2:] else: h_lq, w_lq = img_lqs[0].shape[0:2] h_gt, w_gt = img_gts[0].shape[0:2] lq_patch_size = gt_patch_size // scale if h_gt != h_lq * scale or w_gt != w_lq * scale: raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', f'multiplication of LQ ({h_lq}, {w_lq}).') if h_lq < lq_patch_size or w_lq < lq_patch_size: raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' f'({lq_patch_size}, {lq_patch_size}). ' f'Please remove {gt_path}.') # randomly choose top and left coordinates for lq patch top = random.randint(0, h_lq - lq_patch_size) left = random.randint(0, w_lq - lq_patch_size) # crop lq patch if input_type == 'Tensor': img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs] else: img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] # crop corresponding gt patch top_gt, left_gt = int(top * scale), int(left * scale) if input_type == 'Tensor': img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts] else: img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] if len(img_gts) == 1: img_gts = img_gts[0] if len(img_lqs) == 1: img_lqs = img_lqs[0] return img_gts, img_lqs
Paired random crop. Support Numpy array and Tensor inputs. It crops lists of lq and gt images with corresponding locations. Args: img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. img_lqs (list[ndarray] | ndarray): LQ images. Note that all images should have the same shape. If the input is an ndarray, it will be transformed to a list containing itself. gt_patch_size (int): GT patch size. scale (int): Scale factor. gt_path (str): Path to ground-truth. Default: None. Returns: list[ndarray] | ndarray: GT images and LQ images. If returned results only have one element, just return ndarray.
2,593
import cv2 import random import torch def triplet_random_crop(img_gts, img_lqs, img_segs, gt_patch_size, scale, gt_path=None): if not isinstance(img_gts, list): img_gts = [img_gts] if not isinstance(img_lqs, list): img_lqs = [img_lqs] if not isinstance(img_segs, list): img_segs = [img_segs] # determine input type: Numpy array or Tensor input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy' if input_type == 'Tensor': h_lq, w_lq = img_lqs[0].size()[-2:] h_gt, w_gt = img_gts[0].size()[-2:] h_seg, w_seg = img_segs[0].size()[-2:] else: h_lq, w_lq = img_lqs[0].shape[0:2] h_gt, w_gt = img_gts[0].shape[0:2] h_seg, w_seg = img_segs[0].shape[0:2] lq_patch_size = gt_patch_size // scale if h_gt != h_lq * scale or w_gt != w_lq * scale: raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', f'multiplication of LQ ({h_lq}, {w_lq}).') if h_lq < lq_patch_size or w_lq < lq_patch_size: raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' f'({lq_patch_size}, {lq_patch_size}). ' f'Please remove {gt_path}.') # randomly choose top and left coordinates for lq patch top = random.randint(0, h_lq - lq_patch_size) left = random.randint(0, w_lq - lq_patch_size) # crop lq patch if input_type == 'Tensor': img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs] else: img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs] # crop corresponding gt patch top_gt, left_gt = int(top * scale), int(left * scale) if input_type == 'Tensor': img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts] else: img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts] if input_type == 'Tensor': img_segs = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_segs] else: img_segs = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_segs] if len(img_gts) == 1: img_gts = img_gts[0] if len(img_lqs) == 1: img_lqs = img_lqs[0] if len(img_segs) == 1: img_segs = img_segs[0] return img_gts, img_lqs, img_segs
null
2,594
import cv2 import random import torch The provided code snippet includes necessary dependencies for implementing the `augment` function. Write a Python function `def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False)` to solve the following problem: Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). We use vertical flip and transpose for rotation implementation. All the images in the list use the same augmentation. Args: imgs (list[ndarray] | ndarray): Images to be augmented. If the input is an ndarray, it will be transformed to a list. hflip (bool): Horizontal flip. Default: True. rotation (bool): Ratotation. Default: True. flows (list[ndarray]: Flows to be augmented. If the input is an ndarray, it will be transformed to a list. Dimension is (h, w, 2). Default: None. return_status (bool): Return the status of flip and rotation. Default: False. Returns: list[ndarray] | ndarray: Augmented images and flows. If returned results only have one element, just return ndarray. Here is the function: def augment(imgs, hflip=True, rotation=True, flows=None, return_status=False): """Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). We use vertical flip and transpose for rotation implementation. All the images in the list use the same augmentation. Args: imgs (list[ndarray] | ndarray): Images to be augmented. If the input is an ndarray, it will be transformed to a list. hflip (bool): Horizontal flip. Default: True. rotation (bool): Ratotation. Default: True. flows (list[ndarray]: Flows to be augmented. If the input is an ndarray, it will be transformed to a list. Dimension is (h, w, 2). Default: None. return_status (bool): Return the status of flip and rotation. Default: False. Returns: list[ndarray] | ndarray: Augmented images and flows. If returned results only have one element, just return ndarray. """ hflip = hflip and random.random() < 0.5 vflip = rotation and random.random() < 0.5 rot90 = rotation and random.random() < 0.5 def _augment(img): if hflip: # horizontal cv2.flip(img, 1, img) if vflip: # vertical cv2.flip(img, 0, img) if rot90: img = img.transpose(1, 0, 2) return img def _augment_flow(flow): if hflip: # horizontal cv2.flip(flow, 1, flow) flow[:, :, 0] *= -1 if vflip: # vertical cv2.flip(flow, 0, flow) flow[:, :, 1] *= -1 if rot90: flow = flow.transpose(1, 0, 2) flow = flow[:, :, [1, 0]] return flow if not isinstance(imgs, list): imgs = [imgs] imgs = [_augment(img) for img in imgs] if len(imgs) == 1: imgs = imgs[0] if flows is not None: if not isinstance(flows, list): flows = [flows] flows = [_augment_flow(flow) for flow in flows] if len(flows) == 1: flows = flows[0] return imgs, flows else: if return_status: return imgs, (hflip, vflip, rot90) else: return imgs
Augment: horizontal flips OR rotate (0, 90, 180, 270 degrees). We use vertical flip and transpose for rotation implementation. All the images in the list use the same augmentation. Args: imgs (list[ndarray] | ndarray): Images to be augmented. If the input is an ndarray, it will be transformed to a list. hflip (bool): Horizontal flip. Default: True. rotation (bool): Ratotation. Default: True. flows (list[ndarray]: Flows to be augmented. If the input is an ndarray, it will be transformed to a list. Dimension is (h, w, 2). Default: None. return_status (bool): Return the status of flip and rotation. Default: False. Returns: list[ndarray] | ndarray: Augmented images and flows. If returned results only have one element, just return ndarray.
2,595
import cv2 import random import torch The provided code snippet includes necessary dependencies for implementing the `img_rotate` function. Write a Python function `def img_rotate(img, angle, center=None, scale=1.0)` to solve the following problem: Rotate image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[int]): Rotation center. If the center is None, initialize it as the center of the image. Default: None. scale (float): Isotropic scale factor. Default: 1.0. Here is the function: def img_rotate(img, angle, center=None, scale=1.0): """Rotate image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[int]): Rotation center. If the center is None, initialize it as the center of the image. Default: None. scale (float): Isotropic scale factor. Default: 1.0. """ (h, w) = img.shape[:2] if center is None: center = (w // 2, h // 2) matrix = cv2.getRotationMatrix2D(center, angle, scale) rotated_img = cv2.warpAffine(img, matrix, (w, h)) return rotated_img
Rotate image. Args: img (ndarray): Image to be rotated. angle (float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[int]): Rotation center. If the center is None, initialize it as the center of the image. Default: None. scale (float): Isotropic scale factor. Default: 1.0.
2,596
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') The provided code snippet includes necessary dependencies for implementing the `cdf2` function. Write a Python function `def cdf2(d_matrix, grid)` to solve the following problem: Calculate the CDF of the standard bivariate Gaussian distribution. Used in skewed Gaussian distribution. Args: d_matrix (ndarrasy): skew matrix. grid (ndarray): generated by :func:`mesh_grid`, with the shape (K, K, 2), K is the kernel size. Returns: cdf (ndarray): skewed cdf. Here is the function: def cdf2(d_matrix, grid): """Calculate the CDF of the standard bivariate Gaussian distribution. Used in skewed Gaussian distribution. Args: d_matrix (ndarrasy): skew matrix. grid (ndarray): generated by :func:`mesh_grid`, with the shape (K, K, 2), K is the kernel size. Returns: cdf (ndarray): skewed cdf. """ rv = multivariate_normal([0, 0], [[1, 0], [0, 1]]) grid = np.dot(grid, d_matrix) cdf = rv.cdf(grid) return cdf
Calculate the CDF of the standard bivariate Gaussian distribution. Used in skewed Gaussian distribution. Args: d_matrix (ndarrasy): skew matrix. grid (ndarray): generated by :func:`mesh_grid`, with the shape (K, K, 2), K is the kernel size. Returns: cdf (ndarray): skewed cdf.
2,597
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def random_bivariate_Gaussian(kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=None, isotropic=True, return_sigma=False): """Randomly generate bivariate isotropic or anisotropic Gaussian kernels. In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored. Args: kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi, math.pi] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray): """ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) if isotropic is False: assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) rotation = np.random.uniform(rotation_range[0], rotation_range[1]) else: sigma_y = sigma_x rotation = 0 kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic) # add multiplicative noise if noise_range is not None: assert noise_range[0] < noise_range[1], 'Wrong noise range.' noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) kernel = kernel * noise kernel = kernel / np.sum(kernel) if not return_sigma: return kernel else: return kernel, [sigma_x, sigma_y] def random_bivariate_generalized_Gaussian(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_range, noise_range=None, isotropic=True, return_sigma=False): """Randomly generate bivariate generalized Gaussian kernels. In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored. Args: kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi, math.pi] beta_range (tuple): [0.5, 8] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray): """ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) if isotropic is False: assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) rotation = np.random.uniform(rotation_range[0], rotation_range[1]) else: sigma_y = sigma_x rotation = 0 # assume beta_range[0] < 1 < beta_range[1] if np.random.uniform() < 0.5: beta = np.random.uniform(beta_range[0], 1) else: beta = np.random.uniform(1, beta_range[1]) kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic) # add multiplicative noise if noise_range is not None: assert noise_range[0] < noise_range[1], 'Wrong noise range.' noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) kernel = kernel * noise kernel = kernel / np.sum(kernel) if not return_sigma: return kernel else: return kernel, [sigma_x, sigma_y] def random_bivariate_plateau(kernel_size, sigma_x_range, sigma_y_range, rotation_range, beta_range, noise_range=None, isotropic=True, return_sigma=False): """Randomly generate bivariate plateau kernels. In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored. Args: kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi/2, math.pi/2] beta_range (tuple): [1, 4] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray): """ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.' sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1]) if isotropic is False: assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.' assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.' sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1]) rotation = np.random.uniform(rotation_range[0], rotation_range[1]) else: sigma_y = sigma_x rotation = 0 # TODO: this may be not proper if np.random.uniform() < 0.5: beta = np.random.uniform(beta_range[0], 1) else: beta = np.random.uniform(1, beta_range[1]) kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic) # add multiplicative noise if noise_range is not None: assert noise_range[0] < noise_range[1], 'Wrong noise range.' noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape) kernel = kernel * noise kernel = kernel / np.sum(kernel) if not return_sigma: return kernel else: return kernel, [sigma_x, sigma_y] The provided code snippet includes necessary dependencies for implementing the `random_mixed_kernels` function. Write a Python function `def random_mixed_kernels(kernel_list, kernel_prob, kernel_size=21, sigma_x_range=(0.6, 5), sigma_y_range=(0.6, 5), rotation_range=(-math.pi, math.pi), betag_range=(0.5, 8), betap_range=(0.5, 8), noise_range=None, return_sigma=False)` to solve the following problem: Randomly generate mixed kernels. Args: kernel_list (tuple): a list name of kernel types, support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso', 'plateau_aniso'] kernel_prob (tuple): corresponding kernel probability for each kernel type kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi, math.pi] beta_range (tuple): [0.5, 8] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray): Here is the function: def random_mixed_kernels(kernel_list, kernel_prob, kernel_size=21, sigma_x_range=(0.6, 5), sigma_y_range=(0.6, 5), rotation_range=(-math.pi, math.pi), betag_range=(0.5, 8), betap_range=(0.5, 8), noise_range=None, return_sigma=False): """Randomly generate mixed kernels. Args: kernel_list (tuple): a list name of kernel types, support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso', 'plateau_aniso'] kernel_prob (tuple): corresponding kernel probability for each kernel type kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi, math.pi] beta_range (tuple): [0.5, 8] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray): """ kernel_type = random.choices(kernel_list, kernel_prob)[0] if not return_sigma: if kernel_type == 'iso': kernel = random_bivariate_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'aniso': kernel = random_bivariate_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma) elif kernel_type == 'generalized_iso': kernel = random_bivariate_generalized_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betag_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'generalized_aniso': kernel = random_bivariate_generalized_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betag_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma) elif kernel_type == 'plateau_iso': kernel = random_bivariate_plateau( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'plateau_aniso': kernel = random_bivariate_plateau( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma) return kernel else: if kernel_type == 'iso': kernel, sigma_list = random_bivariate_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'aniso': kernel, sigma_list = random_bivariate_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma) elif kernel_type == 'generalized_iso': kernel, sigma_list = random_bivariate_generalized_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betag_range, noise_range=noise_range, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'generalized_aniso': kernel, sigma_list = random_bivariate_generalized_Gaussian( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betag_range, noise_range=noise_range, isotropic=False, return_sigma=return_sigma) elif kernel_type == 'plateau_iso': kernel, sigma_list = random_bivariate_plateau( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True, return_sigma=return_sigma) elif kernel_type == 'plateau_aniso': kernel, sigma_list = random_bivariate_plateau( kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False, return_sigma=return_sigma) return kernel, sigma_list
Randomly generate mixed kernels. Args: kernel_list (tuple): a list name of kernel types, support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso', 'plateau_aniso'] kernel_prob (tuple): corresponding kernel probability for each kernel type kernel_size (int): sigma_x_range (tuple): [0.6, 5] sigma_y_range (tuple): [0.6, 5] rotation range (tuple): [-math.pi, math.pi] beta_range (tuple): [0.5, 8] noise_range(tuple, optional): multiplicative kernel noise, [0.75, 1.25]. Default: None Returns: kernel (ndarray):
2,598
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') The provided code snippet includes necessary dependencies for implementing the `circular_lowpass_kernel` function. Write a Python function `def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0)` to solve the following problem: 2D sinc filter Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter Args: cutoff (float): cutoff frequency in radians (pi is max) kernel_size (int): horizontal and vertical size, must be odd. pad_to (int): pad kernel size to desired size, must be odd or zero. Here is the function: def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0): """2D sinc filter Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter Args: cutoff (float): cutoff frequency in radians (pi is max) kernel_size (int): horizontal and vertical size, must be odd. pad_to (int): pad kernel size to desired size, must be odd or zero. """ assert kernel_size % 2 == 1, 'Kernel size must be an odd number.' kernel = np.fromfunction( lambda x, y: cutoff * special.j1(cutoff * np.sqrt( (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)) / (2 * np.pi * np.sqrt( (x - (kernel_size - 1) / 2)**2 + (y - (kernel_size - 1) / 2)**2)), [kernel_size, kernel_size]) kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff**2 / (4 * np.pi) kernel = kernel / np.sum(kernel) if pad_to > kernel_size: pad_size = (pad_to - kernel_size) // 2 kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size))) return kernel
2D sinc filter Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter Args: cutoff (float): cutoff frequency in radians (pi is max) kernel_size (int): horizontal and vertical size, must be odd. pad_to (int): pad kernel size to desired size, must be odd or zero.
2,599
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def generate_gaussian_noise(img, sigma=10, gray_noise=False): """Generate Gaussian noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. sigma (float): Noise scale (measured in range 255). Default: 10. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. """ if gray_noise: noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255. noise = np.expand_dims(noise, axis=2).repeat(3, axis=2) else: noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255. return noise The provided code snippet includes necessary dependencies for implementing the `add_gaussian_noise` function. Write a Python function `def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False)` to solve the following problem: Add Gaussian noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. sigma (float): Noise scale (measured in range 255). Default: 10. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. Here is the function: def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False): """Add Gaussian noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. sigma (float): Noise scale (measured in range 255). Default: 10. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. """ noise = generate_gaussian_noise(img, sigma, gray_noise) out = img + noise if clip and rounds: out = np.clip((out * 255.0).round(), 0, 255) / 255. elif clip: out = np.clip(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
Add Gaussian noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. sigma (float): Noise scale (measured in range 255). Default: 10. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32.
2,600
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0): """Add Gaussian noise (PyTorch version). Args: img (Tensor): Shape (b, c, h, w), range[0, 1], float32. scale (float | Tensor): Noise scale. Default: 1.0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. """ b, _, h, w = img.size() if not isinstance(sigma, (float, int)): sigma = sigma.view(img.size(0), 1, 1, 1) if isinstance(gray_noise, (float, int)): cal_gray_noise = gray_noise > 0 else: gray_noise = gray_noise.view(b, 1, 1, 1) cal_gray_noise = torch.sum(gray_noise) > 0 if cal_gray_noise: noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255. noise_gray = noise_gray.view(b, 1, h, w) # always calculate color noise noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255. if cal_gray_noise: noise = noise * (1 - gray_noise) + noise_gray * gray_noise return noise The provided code snippet includes necessary dependencies for implementing the `add_gaussian_noise_pt` function. Write a Python function `def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False)` to solve the following problem: Add Gaussian noise (PyTorch version). Args: img (Tensor): Shape (b, c, h, w), range[0, 1], float32. scale (float | Tensor): Noise scale. Default: 1.0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. Here is the function: def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False): """Add Gaussian noise (PyTorch version). Args: img (Tensor): Shape (b, c, h, w), range[0, 1], float32. scale (float | Tensor): Noise scale. Default: 1.0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. """ noise = generate_gaussian_noise_pt(img, sigma, gray_noise) out = img + noise if clip and rounds: out = torch.clamp((out * 255.0).round(), 0, 255) / 255. elif clip: out = torch.clamp(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
Add Gaussian noise (PyTorch version). Args: img (Tensor): Shape (b, c, h, w), range[0, 1], float32. scale (float | Tensor): Noise scale. Default: 1.0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32.
2,601
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0, return_sigma=False): sigma = np.random.uniform(sigma_range[0], sigma_range[1]) if np.random.uniform() < gray_prob: gray_noise = True else: gray_noise = False if return_sigma: return generate_gaussian_noise(img, sigma, gray_noise), sigma else: return generate_gaussian_noise(img, sigma, gray_noise) def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False, return_sigma=False): if return_sigma: noise, sigma = random_generate_gaussian_noise(img, sigma_range, gray_prob, return_sigma=return_sigma) else: noise = random_generate_gaussian_noise(img, sigma_range, gray_prob, return_sigma=return_sigma) out = img + noise if clip and rounds: out = np.clip((out * 255.0).round(), 0, 255) / 255. elif clip: out = np.clip(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. if return_sigma: return out, sigma else: return out
null
2,602
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0): sigma = torch.rand( img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0] gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device) gray_noise = (gray_noise < gray_prob).float() return generate_gaussian_noise_pt(img, sigma, gray_noise) def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False): noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob) out = img + noise if clip and rounds: out = torch.clamp((out * 255.0).round(), 0, 255) / 255. elif clip: out = torch.clamp(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
null
2,603
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def generate_poisson_noise(img, scale=1.0, gray_noise=False): """Generate poisson noise. Reference: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219 Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. scale (float): Noise scale. Default: 1.0. gray_noise (bool): Whether generate gray noise. Default: False. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. """ if gray_noise: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # round and clip image for counting vals correctly img = np.clip((img * 255.0).round(), 0, 255) / 255. vals = len(np.unique(img)) vals = 2**np.ceil(np.log2(vals)) out = np.float32(np.random.poisson(img * vals) / float(vals)) noise = out - img if gray_noise: noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2) return noise * scale The provided code snippet includes necessary dependencies for implementing the `add_poisson_noise` function. Write a Python function `def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False)` to solve the following problem: Add poisson noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. scale (float): Noise scale. Default: 1.0. gray_noise (bool): Whether generate gray noise. Default: False. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. Here is the function: def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False): """Add poisson noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. scale (float): Noise scale. Default: 1.0. gray_noise (bool): Whether generate gray noise. Default: False. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32. """ noise = generate_poisson_noise(img, scale, gray_noise) out = img + noise if clip and rounds: out = np.clip((out * 255.0).round(), 0, 255) / 255. elif clip: out = np.clip(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
Add poisson noise. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. scale (float): Noise scale. Default: 1.0. gray_noise (bool): Whether generate gray noise. Default: False. Returns: (Numpy array): Returned noisy image, shape (h, w, c), range[0, 1], float32.
2,604
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0): """Generate a batch of poisson noise (PyTorch version) Args: img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32. scale (float | Tensor): Noise scale. Number or Tensor with shape (b). Default: 1.0. gray_noise (float | Tensor): 0-1 number or Tensor with shape (b). 0 for False, 1 for True. Default: 0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. """ b, _, h, w = img.size() if isinstance(gray_noise, (float, int)): cal_gray_noise = gray_noise > 0 else: gray_noise = gray_noise.view(b, 1, 1, 1) cal_gray_noise = torch.sum(gray_noise) > 0 if cal_gray_noise: img_gray = rgb_to_grayscale(img, num_output_channels=1) # round and clip image for counting vals correctly img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255. # use for-loop to get the unique values for each sample vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)] vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list] vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1) out = torch.poisson(img_gray * vals) / vals noise_gray = out - img_gray noise_gray = noise_gray.expand(b, 3, h, w) # always calculate color noise # round and clip image for counting vals correctly img = torch.clamp((img * 255.0).round(), 0, 255) / 255. # use for-loop to get the unique values for each sample vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)] vals_list = [2**np.ceil(np.log2(vals)) for vals in vals_list] vals = img.new_tensor(vals_list).view(b, 1, 1, 1) out = torch.poisson(img * vals) / vals noise = out - img if cal_gray_noise: noise = noise * (1 - gray_noise) + noise_gray * gray_noise if not isinstance(scale, (float, int)): scale = scale.view(b, 1, 1, 1) return noise * scale The provided code snippet includes necessary dependencies for implementing the `add_poisson_noise_pt` function. Write a Python function `def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0)` to solve the following problem: Add poisson noise to a batch of images (PyTorch version). Args: img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32. scale (float | Tensor): Noise scale. Number or Tensor with shape (b). Default: 1.0. gray_noise (float | Tensor): 0-1 number or Tensor with shape (b). 0 for False, 1 for True. Default: 0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. Here is the function: def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0): """Add poisson noise to a batch of images (PyTorch version). Args: img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32. scale (float | Tensor): Noise scale. Number or Tensor with shape (b). Default: 1.0. gray_noise (float | Tensor): 0-1 number or Tensor with shape (b). 0 for False, 1 for True. Default: 0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32. """ noise = generate_poisson_noise_pt(img, scale, gray_noise) out = img + noise if clip and rounds: out = torch.clamp((out * 255.0).round(), 0, 255) / 255. elif clip: out = torch.clamp(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
Add poisson noise to a batch of images (PyTorch version). Args: img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32. scale (float | Tensor): Noise scale. Number or Tensor with shape (b). Default: 1.0. gray_noise (float | Tensor): 0-1 number or Tensor with shape (b). 0 for False, 1 for True. Default: 0. Returns: (Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1], float32.
2,605
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0): def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False): noise = random_generate_poisson_noise(img, scale_range, gray_prob) out = img + noise if clip and rounds: out = np.clip((out * 255.0).round(), 0, 255) / 255. elif clip: out = np.clip(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
null
2,606
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0): scale = torch.rand( img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0] gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device) gray_noise = (gray_noise < gray_prob).float() return generate_poisson_noise_pt(img, scale, gray_noise) def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False): noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob) out = img + noise if clip and rounds: out = torch.clamp((out * 255.0).round(), 0, 255) / 255. elif clip: out = torch.clamp(out, 0, 1) elif rounds: out = (out * 255.0).round() / 255. return out
null
2,607
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_add_speckle_noise(imgs, speckle_std): std_range = speckle_std std_l = std_range[0] std_r = std_range[1] mean=0 std=random.uniform(std_l/255.,std_r/255.) outputs = [] for img in imgs: gauss=np.random.normal(loc=mean,scale=std,size=img.shape) noisy=img+gauss*img noisy=np.clip(noisy,0,1).astype(np.float32) outputs.append(noisy) return outputs
null
2,608
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale def random_add_speckle_noise_pt(img, speckle_std): std_range = speckle_std std_l = std_range[0] std_r = std_range[1] mean=0 std=random.uniform(std_l/255.,std_r/255.) gauss=torch.normal(mean=mean,std=std,size=img.size()).to(img.device) noisy=img+gauss*img noisy=torch.clamp(noisy,0,1) return noisy
null
2,609
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_add_saltpepper_noise(imgs, saltpepper_amount, saltpepper_svsp): p_range = saltpepper_amount p = random.uniform(p_range[0], p_range[1]) q_range = saltpepper_svsp q = random.uniform(q_range[0], q_range[1]) outputs = [] for img in imgs: out = img.copy() flipped = np.random.choice([True, False], size=img.shape, p=[p, 1 - p]) salted = np.random.choice([True, False], size=img.shape, p=[q, 1 - q]) peppered = ~salted out[flipped & salted] = 1 out[flipped & peppered] = 0. noisy = np.clip(out, 0, 1).astype(np.float32) outputs.append(noisy) return outputs
null
2,610
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_add_saltpepper_noise_pt(imgs, saltpepper_amount, saltpepper_svsp): p_range = saltpepper_amount p = random.uniform(p_range[0], p_range[1]) q_range = saltpepper_svsp q = random.uniform(q_range[0], q_range[1]) imgs = imgs.permute(0,2,3,1) outputs = [] for i in range(imgs.size(0)): img = imgs[i] out = img.clone() flipped = np.random.choice([True, False], size=img.shape, p=[p, 1 - p]) salted = np.random.choice([True, False], size=img.shape, p=[q, 1 - q]) peppered = ~salted temp = flipped & salted out[flipped & salted] = 1 out[flipped & peppered] = 0. noisy = torch.clamp(out, 0, 1) outputs.append(noisy.permute(2,0,1)) if len(outputs)>1: return torch.cat(outputs, dim=0) else: return outputs[0].unsqueeze(0)
null
2,611
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def random_add_screen_noise(imgs, linewidth, space): #screen_noise = np.random.uniform() < self.params['noise_prob'][0] linewidth = linewidth linewidth = int(np.random.uniform(linewidth[0], linewidth[1])) space = space space = int(np.random.uniform(space[0], space[1])) center_color = [213,230,230] # RGB outputs = [] for img in imgs: noise = img.copy() tmp_mask = np.zeros((img.shape[1], img.shape[0]), dtype=np.float32) for i in range(0, img.shape[0], int((space+linewidth))): tmp_mask[:, i:(i+linewidth)] = 1 colour_masks = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.float32) colour_masks[:,:,0] = (center_color[0] + np.random.uniform(-20, 20))/255. colour_masks[:,:,1] = (center_color[1] + np.random.uniform(0, 20))/255. colour_masks[:,:,2] = (center_color[2] + np.random.uniform(0, 20))/255. noise_color = cv2.addWeighted(noise, 0.6, colour_masks, 0.4, 0.0) noise = noise*(1-(tmp_mask[:,:,np.newaxis])) + noise_color*(tmp_mask[:,:,np.newaxis]) outputs.append(noise) return outputs
null
2,612
import cv2 import math import numpy as np import random import torch from scipy import special from scipy.stats import multivariate_normal from torchvision.transforms.functional_tensor import rgb_to_grayscale np.seterr(divide='ignore', invalid='ignore') def add_jpg_compression(img, quality=90): """Add JPG compression artifacts. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. quality (float): JPG compression quality. 0 for lowest quality, 100 for best quality. Default: 90. Returns: (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1], float32. """ img = np.clip(img, 0, 1) encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), int(quality)] _, encimg = cv2.imencode('.jpg', img * 255., encode_param) img = np.float32(cv2.imdecode(encimg, 1)) / 255. return img The provided code snippet includes necessary dependencies for implementing the `random_add_jpg_compression` function. Write a Python function `def random_add_jpg_compression(img, quality_range=(90, 100), return_q=False)` to solve the following problem: Randomly add JPG compression artifacts. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. quality_range (tuple[float] | list[float]): JPG compression quality range. 0 for lowest quality, 100 for best quality. Default: (90, 100). Returns: (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1], float32. Here is the function: def random_add_jpg_compression(img, quality_range=(90, 100), return_q=False): """Randomly add JPG compression artifacts. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. quality_range (tuple[float] | list[float]): JPG compression quality range. 0 for lowest quality, 100 for best quality. Default: (90, 100). Returns: (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1], float32. """ quality = np.random.uniform(quality_range[0], quality_range[1]) if return_q: return add_jpg_compression(img, quality), quality else: return add_jpg_compression(img, quality)
Randomly add JPG compression artifacts. Args: img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32. quality_range (tuple[float] | list[float]): JPG compression quality range. 0 for lowest quality, 100 for best quality. Default: (90, 100). Returns: (Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1], float32.
2,613
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir def mod_crop(img, scale): """Mod crop images, used during testing. Args: img (ndarray): Input image. scale (int): Scale factor. Returns: ndarray: Result image. """ img = img.copy() if img.ndim in (2, 3): h, w = img.shape[0], img.shape[1] h_remainder, w_remainder = h % scale, w % scale img = img[:h - h_remainder, :w - w_remainder, ...] else: raise ValueError(f'Wrong img ndim: {img.ndim}.') return img The provided code snippet includes necessary dependencies for implementing the `read_img_seq` function. Write a Python function `def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False)` to solve the following problem: Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. return_imgname(bool): Whether return image names. Default False. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. list[str]: Returned image name list. Here is the function: def read_img_seq(path, require_mod_crop=False, scale=1, return_imgname=False): """Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. return_imgname(bool): Whether return image names. Default False. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. list[str]: Returned image name list. """ if isinstance(path, list): img_paths = path else: img_paths = sorted(list(scandir(path, full_path=True))) imgs = [cv2.imread(v).astype(np.float32) / 255. for v in img_paths] if require_mod_crop: imgs = [mod_crop(img, scale) for img in imgs] imgs = img2tensor(imgs, bgr2rgb=True, float32=True) imgs = torch.stack(imgs, dim=0) if return_imgname: imgnames = [osp.splitext(osp.basename(path))[0] for path in img_paths] return imgs, imgnames else: return imgs
Read a sequence of images from a given folder path. Args: path (list[str] | str): List of image paths or image folder path. require_mod_crop (bool): Require mod crop for each image. Default: False. scale (int): Scale factor for mod_crop. Default: 1. return_imgname(bool): Whether return image names. Default False. Returns: Tensor: size (t, c, h, w), RGB, [0, 1]. list[str]: Returned image name list.
2,614
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `generate_frame_indices` function. Write a Python function `def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection')` to solve the following problem: Generate an index list for reading `num_frames` frames from a sequence of images. Args: crt_idx (int): Current center index. max_frame_num (int): Max number of the sequence of images (from 1). num_frames (int): Reading num_frames frames. padding (str): Padding mode, one of 'replicate' | 'reflection' | 'reflection_circle' | 'circle' Examples: current_idx = 0, num_frames = 5 The generated frame indices under different padding mode: replicate: [0, 0, 0, 1, 2] reflection: [2, 1, 0, 1, 2] reflection_circle: [4, 3, 0, 1, 2] circle: [3, 4, 0, 1, 2] Returns: list[int]: A list of indices. Here is the function: def generate_frame_indices(crt_idx, max_frame_num, num_frames, padding='reflection'): """Generate an index list for reading `num_frames` frames from a sequence of images. Args: crt_idx (int): Current center index. max_frame_num (int): Max number of the sequence of images (from 1). num_frames (int): Reading num_frames frames. padding (str): Padding mode, one of 'replicate' | 'reflection' | 'reflection_circle' | 'circle' Examples: current_idx = 0, num_frames = 5 The generated frame indices under different padding mode: replicate: [0, 0, 0, 1, 2] reflection: [2, 1, 0, 1, 2] reflection_circle: [4, 3, 0, 1, 2] circle: [3, 4, 0, 1, 2] Returns: list[int]: A list of indices. """ assert num_frames % 2 == 1, 'num_frames should be an odd number.' assert padding in ('replicate', 'reflection', 'reflection_circle', 'circle'), f'Wrong padding mode: {padding}.' max_frame_num = max_frame_num - 1 # start from 0 num_pad = num_frames // 2 indices = [] for i in range(crt_idx - num_pad, crt_idx + num_pad + 1): if i < 0: if padding == 'replicate': pad_idx = 0 elif padding == 'reflection': pad_idx = -i elif padding == 'reflection_circle': pad_idx = crt_idx + num_pad - i else: pad_idx = num_frames + i elif i > max_frame_num: if padding == 'replicate': pad_idx = max_frame_num elif padding == 'reflection': pad_idx = max_frame_num * 2 - i elif padding == 'reflection_circle': pad_idx = (crt_idx - num_pad) - (i - max_frame_num) else: pad_idx = i - num_frames else: pad_idx = i indices.append(pad_idx) return indices
Generate an index list for reading `num_frames` frames from a sequence of images. Args: crt_idx (int): Current center index. max_frame_num (int): Max number of the sequence of images (from 1). num_frames (int): Reading num_frames frames. padding (str): Padding mode, one of 'replicate' | 'reflection' | 'reflection_circle' | 'circle' Examples: current_idx = 0, num_frames = 5 The generated frame indices under different padding mode: replicate: [0, 0, 0, 1, 2] reflection: [2, 1, 0, 1, 2] reflection_circle: [4, 3, 0, 1, 2] circle: [3, 4, 0, 1, 2] Returns: list[int]: A list of indices.
2,615
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_lmdb` function. Write a Python function `def paired_paths_from_lmdb(folders, keys)` to solve the following problem: Generate paired paths from lmdb files. Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: :: lq.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, 3)compression level, separated by a white space. Example: `baboon.png (120,125,3) 1` We use the image name without extension as the lmdb key. Note that we use the same key for the corresponding lq and gt images. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. Note that this key is different from lmdb keys. Returns: list[str]: Returned path list. Here is the function: def paired_paths_from_lmdb(folders, keys): """Generate paired paths from lmdb files. Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: :: lq.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, 3)compression level, separated by a white space. Example: `baboon.png (120,125,3) 1` We use the image name without extension as the lmdb key. Note that we use the same key for the corresponding lq and gt images. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. Note that this key is different from lmdb keys. Returns: list[str]: Returned path list. """ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' f'But got {len(folders)}') assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' input_folder, gt_folder = folders input_key, gt_key = keys if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')): raise ValueError(f'{input_key} folder and {gt_key} folder should both in lmdb ' f'formats. But received {input_key}: {input_folder}; ' f'{gt_key}: {gt_folder}') # ensure that the two meta_info files are the same with open(osp.join(input_folder, 'meta_info.txt')) as fin: input_lmdb_keys = [line.split('.')[0] for line in fin] with open(osp.join(gt_folder, 'meta_info.txt')) as fin: gt_lmdb_keys = [line.split('.')[0] for line in fin] if set(input_lmdb_keys) != set(gt_lmdb_keys): raise ValueError(f'Keys in {input_key}_folder and {gt_key}_folder are different.') else: paths = [] for lmdb_key in sorted(input_lmdb_keys): paths.append(dict([(f'{input_key}_path', lmdb_key), (f'{gt_key}_path', lmdb_key)])) return paths
Generate paired paths from lmdb files. Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is: :: lq.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, 3)compression level, separated by a white space. Example: `baboon.png (120,125,3) 1` We use the image name without extension as the lmdb key. Note that we use the same key for the corresponding lq and gt images. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. Note that this key is different from lmdb keys. Returns: list[str]: Returned path list.
2,616
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_meta_info_file` function. Write a Python function `def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl)` to solve the following problem: Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. Here is the function: def paired_paths_from_meta_info_file(folders, keys, meta_info_file, filename_tmpl): """Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. """ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' f'But got {len(folders)}') assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' input_folder, gt_folder = folders input_key, gt_key = keys with open(meta_info_file, 'r') as fin: gt_names = [line.strip().split(' ')[0] for line in fin] paths = [] for gt_name in gt_names: basename, ext = osp.splitext(osp.basename(gt_name)) input_name = f'{filename_tmpl.format(basename)}{ext}' input_path = osp.join(input_folder, input_name) gt_path = osp.join(gt_folder, gt_name) paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) return paths
Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
2,617
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_meta_info_file_2` function. Write a Python function `def paired_paths_from_meta_info_file_2(folders, keys, meta_info_file, filename_tmpl)` to solve the following problem: Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. Here is the function: def paired_paths_from_meta_info_file_2(folders, keys, meta_info_file, filename_tmpl): """Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. """ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' f'But got {len(folders)}') assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' input_folder, gt_folder = folders input_key, gt_key = keys with open(meta_info_file, 'r') as fin: gt_names = [line.strip().split(' ')[0] for line in fin] with open(meta_info_file, 'r') as fin: input_names = [line.strip().split(' ')[1] for line in fin] paths = [] for i in range(len(gt_names)): gt_name = gt_names[i] lq_name = input_names[i] basename, ext = osp.splitext(osp.basename(gt_name)) basename = gt_name[:-len(ext)] gt_path = osp.join(gt_folder, gt_name) basename, ext = osp.splitext(osp.basename(lq_name)) basename = lq_name[:-len(ext)] input_path = osp.join(input_folder, lq_name) paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) return paths
Generate paired paths from an meta information file. Each line in the meta information file contains the image names and image shape (usually for gt), separated by a white space. Example of an meta information file: ``` 0001_s001.png (480,480,3) 0001_s002.png (480,480,3) ``` Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. meta_info_file (str): Path to the meta information file. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
2,618
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paired_paths_from_folder` function. Write a Python function `def paired_paths_from_folder(folders, keys, filename_tmpl)` to solve the following problem: Generate paired paths from folders. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. Here is the function: def paired_paths_from_folder(folders, keys, filename_tmpl): """Generate paired paths from folders. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list. """ assert len(folders) == 2, ('The len of folders should be 2 with [input_folder, gt_folder]. ' f'But got {len(folders)}') assert len(keys) == 2, f'The len of keys should be 2 with [input_key, gt_key]. But got {len(keys)}' input_folder, gt_folder = folders input_key, gt_key = keys input_paths = list(scandir(input_folder)) gt_paths = list(scandir(gt_folder)) assert len(input_paths) == len(gt_paths), (f'{input_key} and {gt_key} datasets have different number of images: ' f'{len(input_paths)}, {len(gt_paths)}.') paths = [] for gt_path in gt_paths: basename, ext = osp.splitext(osp.basename(gt_path)) input_name = f'{filename_tmpl.format(basename)}{ext}' input_path = osp.join(input_folder, input_name) assert input_name in input_paths, f'{input_name} is not in {input_key}_paths.' gt_path = osp.join(gt_folder, gt_path) paths.append(dict([(f'{input_key}_path', input_path), (f'{gt_key}_path', gt_path)])) return paths
Generate paired paths from folders. Args: folders (list[str]): A list of folder path. The order of list should be [input_folder, gt_folder]. keys (list[str]): A list of keys identifying folders. The order should be in consistent with folders, e.g., ['lq', 'gt']. filename_tmpl (str): Template for each filename. Note that the template excludes the file extension. Usually the filename_tmpl is for files in the input folder. Returns: list[str]: Returned path list.
2,619
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paths_from_folder` function. Write a Python function `def paths_from_folder(folder)` to solve the following problem: Generate paths from folder. Args: folder (str): Folder path. Returns: list[str]: Returned path list. Here is the function: def paths_from_folder(folder): """Generate paths from folder. Args: folder (str): Folder path. Returns: list[str]: Returned path list. """ paths = list(scandir(folder)) paths = [osp.join(folder, path) for path in paths] return paths
Generate paths from folder. Args: folder (str): Folder path. Returns: list[str]: Returned path list.
2,620
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir The provided code snippet includes necessary dependencies for implementing the `paths_from_lmdb` function. Write a Python function `def paths_from_lmdb(folder)` to solve the following problem: Generate paths from lmdb. Args: folder (str): Folder path. Returns: list[str]: Returned path list. Here is the function: def paths_from_lmdb(folder): """Generate paths from lmdb. Args: folder (str): Folder path. Returns: list[str]: Returned path list. """ if not folder.endswith('.lmdb'): raise ValueError(f'Folder {folder}folder should in lmdb format.') with open(osp.join(folder, 'meta_info.txt')) as fin: paths = [line.split('.')[0] for line in fin] return paths
Generate paths from lmdb. Args: folder (str): Folder path. Returns: list[str]: Returned path list.
2,621
import cv2 import numpy as np import torch from os import path as osp from torch.nn import functional as F from basicsr.data.transforms import mod_crop from basicsr.utils import img2tensor, scandir def generate_gaussian_kernel(kernel_size=13, sigma=1.6): """Generate Gaussian kernel used in `duf_downsample`. Args: kernel_size (int): Kernel size. Default: 13. sigma (float): Sigma of the Gaussian kernel. Default: 1.6. Returns: np.array: The Gaussian kernel. """ from scipy.ndimage import filters as filters kernel = np.zeros((kernel_size, kernel_size)) # set element at the middle to one, a dirac delta kernel[kernel_size // 2, kernel_size // 2] = 1 # gaussian-smooth the dirac, resulting in a gaussian filter return filters.gaussian_filter(kernel, sigma) The provided code snippet includes necessary dependencies for implementing the `duf_downsample` function. Write a Python function `def duf_downsample(x, kernel_size=13, scale=4)` to solve the following problem: Downsamping with Gaussian kernel used in the DUF official code. Args: x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). kernel_size (int): Kernel size. Default: 13. scale (int): Downsampling factor. Supported scale: (2, 3, 4). Default: 4. Returns: Tensor: DUF downsampled frames. Here is the function: def duf_downsample(x, kernel_size=13, scale=4): """Downsamping with Gaussian kernel used in the DUF official code. Args: x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). kernel_size (int): Kernel size. Default: 13. scale (int): Downsampling factor. Supported scale: (2, 3, 4). Default: 4. Returns: Tensor: DUF downsampled frames. """ assert scale in (2, 3, 4), f'Only support scale (2, 3, 4), but got {scale}.' squeeze_flag = False if x.ndim == 4: squeeze_flag = True x = x.unsqueeze(0) b, t, c, h, w = x.size() x = x.view(-1, 1, h, w) pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2 x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect') gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale) gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(0).unsqueeze(0) x = F.conv2d(x, gaussian_filter, stride=scale) x = x[:, :, 2:-2, 2:-2] x = x.view(b, t, c, x.size(2), x.size(3)) if squeeze_flag: x = x.squeeze(0) return x
Downsamping with Gaussian kernel used in the DUF official code. Args: x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w). kernel_size (int): Kernel size. Default: 13. scale (int): Downsampling factor. Supported scale: (2, 3, 4). Default: 4. Returns: Tensor: DUF downsampled frames.
2,622
import math import random import torch from torch import nn from torch.nn import functional as F from basicsr.ops.fused_act import FusedLeakyReLU, fused_leaky_relu from basicsr.ops.upfirdn2d import upfirdn2d from basicsr.utils.registry import ARCH_REGISTRY The provided code snippet includes necessary dependencies for implementing the `make_resample_kernel` function. Write a Python function `def make_resample_kernel(k)` to solve the following problem: Make resampling kernel for UpFirDn. Args: k (list[int]): A list indicating the 1D resample kernel magnitude. Returns: Tensor: 2D resampled kernel. Here is the function: def make_resample_kernel(k): """Make resampling kernel for UpFirDn. Args: k (list[int]): A list indicating the 1D resample kernel magnitude. Returns: Tensor: 2D resampled kernel. """ k = torch.tensor(k, dtype=torch.float32) if k.ndim == 1: k = k[None, :] * k[:, None] # to 2D kernel, outer product # normalize k /= k.sum() return k
Make resampling kernel for UpFirDn. Args: k (list[int]): A list indicating the 1D resample kernel magnitude. Returns: Tensor: 2D resampled kernel.
2,623
import os import torch from collections import OrderedDict from torch import nn as nn from torchvision.models import vgg as vgg from basicsr.utils.registry import ARCH_REGISTRY The provided code snippet includes necessary dependencies for implementing the `insert_bn` function. Write a Python function `def insert_bn(names)` to solve the following problem: Insert bn layer after each conv. Args: names (list): The list of layer names. Returns: list: The list of layer names with bn layers. Here is the function: def insert_bn(names): """Insert bn layer after each conv. Args: names (list): The list of layer names. Returns: list: The list of layer names with bn layers. """ names_bn = [] for name in names: names_bn.append(name) if 'conv' in name: position = name.replace('conv', '') names_bn.append('bn' + position) return names_bn
Insert bn layer after each conv. Args: names (list): The list of layer names. Returns: list: The list of layer names with bn layers.
2,624
import os import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.model_zoo import load_url from torchvision import models FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' LOCAL_FID_WEIGHTS = 'experiments/pretrained_models/pt_inception-2015-12-05-6726825d.pth' class FIDInceptionA(models.inception.InceptionA): """InceptionA block patched for FID computation""" def __init__(self, in_channels, pool_features): super(FIDInceptionA, self).__init__(in_channels, pool_features) def forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionC(models.inception.InceptionC): """InceptionC block patched for FID computation""" def __init__(self, in_channels, channels_7x7): super(FIDInceptionC, self).__init__(in_channels, channels_7x7) def forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionE_1(models.inception.InceptionE): """First InceptionE block patched for FID computation""" def __init__(self, in_channels): super(FIDInceptionE_1, self).__init__(in_channels) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) # Patch: Tensorflow's average pool does not use the padded zero's in # its average calculation branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1, count_include_pad=False) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class FIDInceptionE_2(models.inception.InceptionE): """Second InceptionE block patched for FID computation""" def __init__(self, in_channels): super(FIDInceptionE_2, self).__init__(in_channels) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) # Patch: The FID Inception model uses max pooling instead of average # pooling. This is likely an error in this specific Inception # implementation, as other Inception models use average pooling here # (which matches the description in the paper). branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) The provided code snippet includes necessary dependencies for implementing the `fid_inception_v3` function. Write a Python function `def fid_inception_v3()` to solve the following problem: Build pretrained Inception model for FID computation. The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. Here is the function: def fid_inception_v3(): """Build pretrained Inception model for FID computation. The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model. """ try: inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False, init_weights=False) except TypeError: # pytorch < 1.5 does not have init_weights for inception_v3 inception = models.inception_v3(num_classes=1008, aux_logits=False, pretrained=False) inception.Mixed_5b = FIDInceptionA(192, pool_features=32) inception.Mixed_5c = FIDInceptionA(256, pool_features=64) inception.Mixed_5d = FIDInceptionA(288, pool_features=64) inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128) inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160) inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192) inception.Mixed_7b = FIDInceptionE_1(1280) inception.Mixed_7c = FIDInceptionE_2(2048) if os.path.exists(LOCAL_FID_WEIGHTS): state_dict = torch.load(LOCAL_FID_WEIGHTS, map_location=lambda storage, loc: storage) else: state_dict = load_url(FID_WEIGHTS_URL, progress=True) inception.load_state_dict(state_dict) return inception
Build pretrained Inception model for FID computation. The Inception model for FID computation uses a different set of weights and has a slightly different structure than torchvision's Inception. This method first constructs torchvision's Inception and then patches the necessary parts that are different in the FID Inception model.
2,625
import re import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.utils import spectral_norm def lip2d(x, logit, kernel=3, stride=2, padding=1): weight = logit.exp() return F.avg_pool2d(x * weight, kernel, stride, padding) / F.avg_pool2d(weight, kernel, stride, padding)
null
2,626
import re import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import init from torch.nn.utils import spectral_norm def get_nonspade_norm_layer(norm_type='instance'): # helper function to get # output channels of the previous layer def get_out_channel(layer): if hasattr(layer, 'out_channels'): return getattr(layer, 'out_channels') return layer.weight.size(0) # this function will be returned def add_norm_layer(layer): nonlocal norm_type if norm_type.startswith('spectral'): layer = spectral_norm(layer) subnorm_type = norm_type[len('spectral'):] if subnorm_type == 'none' or len(subnorm_type) == 0: return layer # remove bias in the previous layer, which is meaningless # since it has no effect after normalization if getattr(layer, 'bias', None) is not None: delattr(layer, 'bias') layer.register_parameter('bias', None) if subnorm_type == 'batch': norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True) elif subnorm_type == 'sync_batch': print('SyncBatchNorm is currently not supported under single-GPU mode, switch to "instance" instead') # norm_layer = SynchronizedBatchNorm2d( # get_out_channel(layer), affine=True) norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False) elif subnorm_type == 'instance': norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False) else: raise ValueError(f'normalization layer {subnorm_type} is not recognized') return nn.Sequential(layer, norm_layer) print('This is a legacy from nvlabs/SPADE, and will be removed in future versions.') return add_norm_layer
null
2,627
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function from torch.nn.utils.spectral_norm import spectral_norm def calc_mean_std(feat, eps=1e-5): """Calculate mean and std for adaptive_instance_normalization. Args: feat (Tensor): 4D tensor. eps (float): A small value added to the variance to avoid divide-by-zero. Default: 1e-5. """ size = feat.size() assert len(size) == 4, 'The input feature should be 4D tensor.' n, c = size[:2] feat_var = feat.view(n, c, -1).var(dim=2) + eps feat_std = feat_var.sqrt().view(n, c, 1, 1) feat_mean = feat.view(n, c, -1).mean(dim=2).view(n, c, 1, 1) return feat_mean, feat_std The provided code snippet includes necessary dependencies for implementing the `adaptive_instance_normalization` function. Write a Python function `def adaptive_instance_normalization(content_feat, style_feat)` to solve the following problem: Adaptive instance normalization. Adjust the reference features to have the similar color and illuminations as those in the degradate features. Args: content_feat (Tensor): The reference feature. style_feat (Tensor): The degradate features. Here is the function: def adaptive_instance_normalization(content_feat, style_feat): """Adaptive instance normalization. Adjust the reference features to have the similar color and illuminations as those in the degradate features. Args: content_feat (Tensor): The reference feature. style_feat (Tensor): The degradate features. """ size = content_feat.size() style_mean, style_std = calc_mean_std(style_feat) content_mean, content_std = calc_mean_std(content_feat) normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size) return normalized_feat * style_std.expand(size) + style_mean.expand(size)
Adaptive instance normalization. Adjust the reference features to have the similar color and illuminations as those in the degradate features. Args: content_feat (Tensor): The reference feature. style_feat (Tensor): The degradate features.
2,628
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function from torch.nn.utils.spectral_norm import spectral_norm def AttentionBlock(in_channel): return nn.Sequential( spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)), nn.LeakyReLU(0.2, True), spectral_norm(nn.Conv2d(in_channel, in_channel, 3, 1, 1)))
null
2,629
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Function from torch.nn.utils.spectral_norm import spectral_norm The provided code snippet includes necessary dependencies for implementing the `conv_block` function. Write a Python function `def conv_block(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=True)` to solve the following problem: Conv block used in MSDilationBlock. Here is the function: def conv_block(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=True): """Conv block used in MSDilationBlock.""" return nn.Sequential( spectral_norm( nn.Conv2d( in_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=((kernel_size - 1) // 2) * dilation, bias=bias)), nn.LeakyReLU(0.2), spectral_norm( nn.Conv2d( out_channels, out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=((kernel_size - 1) // 2) * dilation, bias=bias)), )
Conv block used in MSDilationBlock.
2,630
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger The provided code snippet includes necessary dependencies for implementing the `default_init_weights` function. Write a Python function `def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs)` to solve the following problem: Initialize network weights. Args: module_list (list[nn.Module] | nn.Module): Modules to be initialized. scale (float): Scale initialized weights, especially for residual blocks. Default: 1. bias_fill (float): The value to fill bias. Default: 0 kwargs (dict): Other arguments for initialization function. Here is the function: def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs): """Initialize network weights. Args: module_list (list[nn.Module] | nn.Module): Modules to be initialized. scale (float): Scale initialized weights, especially for residual blocks. Default: 1. bias_fill (float): The value to fill bias. Default: 0 kwargs (dict): Other arguments for initialization function. """ if not isinstance(module_list, list): module_list = [module_list] for module in module_list: for m in module.modules(): if isinstance(m, nn.Conv2d): init.kaiming_normal_(m.weight, **kwargs) m.weight.data *= scale if m.bias is not None: m.bias.data.fill_(bias_fill) elif isinstance(m, nn.Linear): init.kaiming_normal_(m.weight, **kwargs) m.weight.data *= scale if m.bias is not None: m.bias.data.fill_(bias_fill) elif isinstance(m, _BatchNorm): init.constant_(m.weight, 1) if m.bias is not None: m.bias.data.fill_(bias_fill)
Initialize network weights. Args: module_list (list[nn.Module] | nn.Module): Modules to be initialized. scale (float): Scale initialized weights, especially for residual blocks. Default: 1. bias_fill (float): The value to fill bias. Default: 0 kwargs (dict): Other arguments for initialization function.
2,631
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger The provided code snippet includes necessary dependencies for implementing the `make_layer` function. Write a Python function `def make_layer(basic_block, num_basic_block, **kwarg)` to solve the following problem: Make layers by stacking the same blocks. Args: basic_block (nn.module): nn.module class for basic block. num_basic_block (int): number of blocks. Returns: nn.Sequential: Stacked blocks in nn.Sequential. Here is the function: def make_layer(basic_block, num_basic_block, **kwarg): """Make layers by stacking the same blocks. Args: basic_block (nn.module): nn.module class for basic block. num_basic_block (int): number of blocks. Returns: nn.Sequential: Stacked blocks in nn.Sequential. """ layers = [] for _ in range(num_basic_block): layers.append(basic_block(**kwarg)) return nn.Sequential(*layers)
Make layers by stacking the same blocks. Args: basic_block (nn.module): nn.module class for basic block. num_basic_block (int): number of blocks. Returns: nn.Sequential: Stacked blocks in nn.Sequential.
2,632
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger The provided code snippet includes necessary dependencies for implementing the `flow_warp` function. Write a Python function `def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True)` to solve the following problem: Warp an image or feature map with optical flow. Args: x (Tensor): Tensor with size (n, c, h, w). flow (Tensor): Tensor with size (n, h, w, 2), normal value. interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. padding_mode (str): 'zeros' or 'border' or 'reflection'. Default: 'zeros'. align_corners (bool): Before pytorch 1.3, the default value is align_corners=True. After pytorch 1.3, the default value is align_corners=False. Here, we use the True as default. Returns: Tensor: Warped image or feature map. Here is the function: def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True): """Warp an image or feature map with optical flow. Args: x (Tensor): Tensor with size (n, c, h, w). flow (Tensor): Tensor with size (n, h, w, 2), normal value. interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. padding_mode (str): 'zeros' or 'border' or 'reflection'. Default: 'zeros'. align_corners (bool): Before pytorch 1.3, the default value is align_corners=True. After pytorch 1.3, the default value is align_corners=False. Here, we use the True as default. Returns: Tensor: Warped image or feature map. """ assert x.size()[-2:] == flow.size()[1:3] _, _, h, w = x.size() # create mesh grid grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x)) grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2 grid.requires_grad = False vgrid = grid + flow # scale grid to [-1,1] vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0 vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0 vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3) output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners) # TODO, what if align_corners=False return output
Warp an image or feature map with optical flow. Args: x (Tensor): Tensor with size (n, c, h, w). flow (Tensor): Tensor with size (n, h, w, 2), normal value. interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'. padding_mode (str): 'zeros' or 'border' or 'reflection'. Default: 'zeros'. align_corners (bool): Before pytorch 1.3, the default value is align_corners=True. After pytorch 1.3, the default value is align_corners=False. Here, we use the True as default. Returns: Tensor: Warped image or feature map.
2,633
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger The provided code snippet includes necessary dependencies for implementing the `resize_flow` function. Write a Python function `def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False)` to solve the following problem: Resize a flow according to ratio or shape. Args: flow (Tensor): Precomputed flow. shape [N, 2, H, W]. size_type (str): 'ratio' or 'shape'. sizes (list[int | float]): the ratio for resizing or the final output shape. 1) The order of ratio should be [ratio_h, ratio_w]. For downsampling, the ratio should be smaller than 1.0 (i.e., ratio < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., ratio > 1.0). 2) The order of output_size should be [out_h, out_w]. interp_mode (str): The mode of interpolation for resizing. Default: 'bilinear'. align_corners (bool): Whether align corners. Default: False. Returns: Tensor: Resized flow. Here is the function: def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False): """Resize a flow according to ratio or shape. Args: flow (Tensor): Precomputed flow. shape [N, 2, H, W]. size_type (str): 'ratio' or 'shape'. sizes (list[int | float]): the ratio for resizing or the final output shape. 1) The order of ratio should be [ratio_h, ratio_w]. For downsampling, the ratio should be smaller than 1.0 (i.e., ratio < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., ratio > 1.0). 2) The order of output_size should be [out_h, out_w]. interp_mode (str): The mode of interpolation for resizing. Default: 'bilinear'. align_corners (bool): Whether align corners. Default: False. Returns: Tensor: Resized flow. """ _, _, flow_h, flow_w = flow.size() if size_type == 'ratio': output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1]) elif size_type == 'shape': output_h, output_w = sizes[0], sizes[1] else: raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.') input_flow = flow.clone() ratio_h = output_h / flow_h ratio_w = output_w / flow_w input_flow[:, 0, :, :] *= ratio_w input_flow[:, 1, :, :] *= ratio_h resized_flow = F.interpolate( input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners) return resized_flow
Resize a flow according to ratio or shape. Args: flow (Tensor): Precomputed flow. shape [N, 2, H, W]. size_type (str): 'ratio' or 'shape'. sizes (list[int | float]): the ratio for resizing or the final output shape. 1) The order of ratio should be [ratio_h, ratio_w]. For downsampling, the ratio should be smaller than 1.0 (i.e., ratio < 1.0). For upsampling, the ratio should be larger than 1.0 (i.e., ratio > 1.0). 2) The order of output_size should be [out_h, out_w]. interp_mode (str): The mode of interpolation for resizing. Default: 'bilinear'. align_corners (bool): Whether align corners. Default: False. Returns: Tensor: Resized flow.
2,634
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger The provided code snippet includes necessary dependencies for implementing the `pixel_unshuffle` function. Write a Python function `def pixel_unshuffle(x, scale)` to solve the following problem: Pixel unshuffle. Args: x (Tensor): Input feature with shape (b, c, hh, hw). scale (int): Downsample ratio. Returns: Tensor: the pixel unshuffled feature. Here is the function: def pixel_unshuffle(x, scale): """ Pixel unshuffle. Args: x (Tensor): Input feature with shape (b, c, hh, hw). scale (int): Downsample ratio. Returns: Tensor: the pixel unshuffled feature. """ b, c, hh, hw = x.size() out_channel = c * (scale**2) assert hh % scale == 0 and hw % scale == 0 h = hh // scale w = hw // scale x_view = x.view(b, c, h, scale, w, scale) return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
Pixel unshuffle. Args: x (Tensor): Input feature with shape (b, c, hh, hw). scale (int): Downsample ratio. Returns: Tensor: the pixel unshuffled feature.
2,635
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger def _no_grad_trunc_normal_(tensor, mean, std, a, b): # From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py # Cut & paste from PyTorch official master until it's in a few official releases - RW # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf def norm_cdf(x): # Computes standard normal cumulative distribution function return (1. + math.erf(x / math.sqrt(2.))) / 2. if (mean < a - 2 * std) or (mean > b + 2 * std): warnings.warn( 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' 'The distribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): # Values are generated by using a truncated uniform distribution and # then using the inverse CDF for the normal distribution. # Get upper and lower cdf values low = norm_cdf((a - mean) / std) up = norm_cdf((b - mean) / std) # Uniformly fill tensor with values from [low, up], then translate to # [2l-1, 2u-1]. tensor.uniform_(2 * low - 1, 2 * up - 1) # Use inverse cdf transform for normal distribution to get truncated # standard normal tensor.erfinv_() # Transform to proper mean, std tensor.mul_(std * math.sqrt(2.)) tensor.add_(mean) # Clamp to ensure it's in the proper range tensor.clamp_(min=a, max=b) return tensor The provided code snippet includes necessary dependencies for implementing the `trunc_normal_` function. Write a Python function `def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.)` to solve the following problem: r"""Fills the input Tensor with values drawn from a truncated normal distribution. From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) Here is the function: def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): r"""Fills the input Tensor with values drawn from a truncated normal distribution. From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) """ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
r"""Fills the input Tensor with values drawn from a truncated normal distribution. From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
2,636
import collections.abc import math import torch import torchvision import warnings from distutils.version import LooseVersion from itertools import repeat from torch import nn as nn from torch.nn import functional as F from torch.nn import init as init from torch.nn.modules.batchnorm import _BatchNorm from basicsr.ops.dcn import ModulatedDeformConvPack, modulated_deform_conv from basicsr.utils import get_root_logger def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse
null
2,637
import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from basicsr.utils.registry import ARCH_REGISTRY from .arch_util import to_2tuple, trunc_normal_ The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0., training: bool = False)` to solve the following problem: Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py Here is the function: def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
2,638
import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from basicsr.utils.registry import ARCH_REGISTRY from .arch_util import to_2tuple, trunc_normal_ The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem: Args: x: (b, h, w, c) window_size (int): window size Returns: windows: (num_windows*b, window_size, window_size, c) Here is the function: def window_partition(x, window_size): """ Args: x: (b, h, w, c) window_size (int): window size Returns: windows: (num_windows*b, window_size, window_size, c) """ b, h, w, c = x.shape x = x.view(b, h // window_size, window_size, w // window_size, window_size, c) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c) return windows
Args: x: (b, h, w, c) window_size (int): window size Returns: windows: (num_windows*b, window_size, window_size, c)
2,639
import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from basicsr.utils.registry import ARCH_REGISTRY from .arch_util import to_2tuple, trunc_normal_ The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, h, w)` to solve the following problem: Args: windows: (num_windows*b, window_size, window_size, c) window_size (int): Window size h (int): Height of image w (int): Width of image Returns: x: (b, h, w, c) Here is the function: def window_reverse(windows, window_size, h, w): """ Args: windows: (num_windows*b, window_size, window_size, c) window_size (int): Window size h (int): Height of image w (int): Width of image Returns: x: (b, h, w, c) """ b = int(windows.shape[0] / (h * w / window_size / window_size)) x = windows.view(b, h // window_size, w // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1) return x
Args: windows: (num_windows*b, window_size, window_size, c) window_size (int): Window size h (int): Height of image w (int): Width of image Returns: x: (b, h, w, c)
2,640
import os import torch from torch.autograd import Function from torch.nn import functional as F class UpFirDn2d(Function): def forward(ctx, input, kernel, up, down, pad): up_x, up_y = up down_x, down_y = down pad_x0, pad_x1, pad_y0, pad_y1 = pad kernel_h, kernel_w = kernel.shape _, channel, in_h, in_w = input.shape ctx.in_size = input.shape input = input.reshape(-1, in_h, in_w, 1) ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 ctx.out_size = (out_h, out_w) ctx.up = (up_x, up_y) ctx.down = (down_x, down_y) ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) g_pad_x0 = kernel_w - pad_x0 - 1 g_pad_y0 = kernel_h - pad_y0 - 1 g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) out = upfirdn2d_ext.upfirdn2d(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1) # out = out.view(major, out_h, out_w, minor) out = out.view(-1, channel, out_h, out_w) return out def backward(ctx, grad_output): kernel, grad_kernel = ctx.saved_tensors grad_input = UpFirDn2dBackward.apply( grad_output, kernel, grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size, ctx.out_size, ) return grad_input, None, None, None, None def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1): _, channel, in_h, in_w = input.shape input = input.reshape(-1, in_h, in_w, 1) _, in_h, in_w, minor = input.shape kernel_h, kernel_w = kernel.shape out = input.view(-1, in_h, 1, in_w, 1, minor) out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) out = out.view(-1, in_h * up_y, in_w * up_x, minor) out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]) out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ] out = out.permute(0, 3, 1, 2) out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]) w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) out = F.conv2d(out, w) out = out.reshape( -1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, ) out = out.permute(0, 2, 3, 1) out = out[:, ::down_y, ::down_x, :] out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 return out.view(-1, channel, out_h, out_w) def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): if input.device.type == 'cpu': out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]) else: out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])) return out
null
2,641
import os import torch from torch import nn from torch.autograd import Function class FusedLeakyReLUFunction(Function): def forward(ctx, input, bias, negative_slope, scale): empty = input.new_empty(0) out = fused_act_ext.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) ctx.save_for_backward(out) ctx.negative_slope = negative_slope ctx.scale = scale return out def backward(ctx, grad_output): out, = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(grad_output, out, ctx.negative_slope, ctx.scale) return grad_input, grad_bias, None, None def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5): return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
null
2,642
import math from collections import Counter from torch.optim.lr_scheduler import _LRScheduler The provided code snippet includes necessary dependencies for implementing the `get_position_from_periods` function. Write a Python function `def get_position_from_periods(iteration, cumulative_period)` to solve the following problem: Get the position from a period list. It will return the index of the right-closest number in the period list. For example, the cumulative_period = [100, 200, 300, 400], if iteration == 50, return 0; if iteration == 210, return 2; if iteration == 300, return 2. Args: iteration (int): Current iteration. cumulative_period (list[int]): Cumulative period list. Returns: int: The position of the right-closest number in the period list. Here is the function: def get_position_from_periods(iteration, cumulative_period): """Get the position from a period list. It will return the index of the right-closest number in the period list. For example, the cumulative_period = [100, 200, 300, 400], if iteration == 50, return 0; if iteration == 210, return 2; if iteration == 300, return 2. Args: iteration (int): Current iteration. cumulative_period (list[int]): Cumulative period list. Returns: int: The position of the right-closest number in the period list. """ for i, period in enumerate(cumulative_period): if iteration <= period: return i
Get the position from a period list. It will return the index of the right-closest number in the period list. For example, the cumulative_period = [100, 200, 300, 400], if iteration == 50, return 0; if iteration == 210, return 2; if iteration == 300, return 2. Args: iteration (int): Current iteration. cumulative_period (list[int]): Cumulative period list. Returns: int: The position of the right-closest number in the period list.
2,643
import functools import os import subprocess import torch import torch.distributed as dist import torch.multiprocessing as mp def get_dist_info(): if dist.is_available(): initialized = dist.is_initialized() else: initialized = False if initialized: rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return rank, world_size def master_only(func): @functools.wraps(func) def wrapper(*args, **kwargs): rank, _ = get_dist_info() if rank == 0: return func(*args, **kwargs) return wrapper
null
2,644
import cv2 import lmdb import sys from multiprocessing import Pool from os import path as osp from tqdm import tqdm def read_img_worker(path, key, compress_level): """Read image worker. Args: path (str): Image path. key (str): Image key. compress_level (int): Compress level when encoding images. Returns: str: Image key. byte: Image byte. tuple[int]: Image shape. """ img = cv2.imread(path, cv2.IMREAD_UNCHANGED) if img.ndim == 2: h, w = img.shape c = 1 else: h, w, c = img.shape _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) return (key, img_byte, (h, w, c)) The provided code snippet includes necessary dependencies for implementing the `make_lmdb_from_imgs` function. Write a Python function `def make_lmdb_from_imgs(data_path, lmdb_path, img_path_list, keys, batch=5000, compress_level=1, multiprocessing_read=False, n_thread=40, map_size=None)` to solve the following problem: Make lmdb from images. Contents of lmdb. The file structure is: :: example.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, and 3)compression level, separated by a white space. For example, the meta information could be: `000_00000000.png (720,1280,3) 1`, which means: 1) image name (with extension): 000_00000000.png; 2) image shape: (720,1280,3); 3) compression level: 1 We use the image name without extension as the lmdb key. If `multiprocessing_read` is True, it will read all the images to memory using multiprocessing. Thus, your server needs to have enough memory. Args: data_path (str): Data path for reading images. lmdb_path (str): Lmdb save path. img_path_list (str): Image path list. keys (str): Used for lmdb keys. batch (int): After processing batch images, lmdb commits. Default: 5000. compress_level (int): Compress level when encoding images. Default: 1. multiprocessing_read (bool): Whether use multiprocessing to read all the images to memory. Default: False. n_thread (int): For multiprocessing. map_size (int | None): Map size for lmdb env. If None, use the estimated size from images. Default: None Here is the function: def make_lmdb_from_imgs(data_path, lmdb_path, img_path_list, keys, batch=5000, compress_level=1, multiprocessing_read=False, n_thread=40, map_size=None): """Make lmdb from images. Contents of lmdb. The file structure is: :: example.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, and 3)compression level, separated by a white space. For example, the meta information could be: `000_00000000.png (720,1280,3) 1`, which means: 1) image name (with extension): 000_00000000.png; 2) image shape: (720,1280,3); 3) compression level: 1 We use the image name without extension as the lmdb key. If `multiprocessing_read` is True, it will read all the images to memory using multiprocessing. Thus, your server needs to have enough memory. Args: data_path (str): Data path for reading images. lmdb_path (str): Lmdb save path. img_path_list (str): Image path list. keys (str): Used for lmdb keys. batch (int): After processing batch images, lmdb commits. Default: 5000. compress_level (int): Compress level when encoding images. Default: 1. multiprocessing_read (bool): Whether use multiprocessing to read all the images to memory. Default: False. n_thread (int): For multiprocessing. map_size (int | None): Map size for lmdb env. If None, use the estimated size from images. Default: None """ assert len(img_path_list) == len(keys), ('img_path_list and keys should have the same length, ' f'but got {len(img_path_list)} and {len(keys)}') print(f'Create lmdb for {data_path}, save to {lmdb_path}...') print(f'Totoal images: {len(img_path_list)}') if not lmdb_path.endswith('.lmdb'): raise ValueError("lmdb_path must end with '.lmdb'.") if osp.exists(lmdb_path): print(f'Folder {lmdb_path} already exists. Exit.') sys.exit(1) if multiprocessing_read: # read all the images to memory (multiprocessing) dataset = {} # use dict to keep the order for multiprocessing shapes = {} print(f'Read images with multiprocessing, #thread: {n_thread} ...') pbar = tqdm(total=len(img_path_list), unit='image') def callback(arg): """get the image data and update pbar.""" key, dataset[key], shapes[key] = arg pbar.update(1) pbar.set_description(f'Read {key}') pool = Pool(n_thread) for path, key in zip(img_path_list, keys): pool.apply_async(read_img_worker, args=(osp.join(data_path, path), key, compress_level), callback=callback) pool.close() pool.join() pbar.close() print(f'Finish reading {len(img_path_list)} images.') # create lmdb environment if map_size is None: # obtain data size for one image img = cv2.imread(osp.join(data_path, img_path_list[0]), cv2.IMREAD_UNCHANGED) _, img_byte = cv2.imencode('.png', img, [cv2.IMWRITE_PNG_COMPRESSION, compress_level]) data_size_per_img = img_byte.nbytes print('Data size per image is: ', data_size_per_img) data_size = data_size_per_img * len(img_path_list) map_size = data_size * 10 env = lmdb.open(lmdb_path, map_size=map_size) # write data to lmdb pbar = tqdm(total=len(img_path_list), unit='chunk') txn = env.begin(write=True) txt_file = open(osp.join(lmdb_path, 'meta_info.txt'), 'w') for idx, (path, key) in enumerate(zip(img_path_list, keys)): pbar.update(1) pbar.set_description(f'Write {key}') key_byte = key.encode('ascii') if multiprocessing_read: img_byte = dataset[key] h, w, c = shapes[key] else: _, img_byte, img_shape = read_img_worker(osp.join(data_path, path), key, compress_level) h, w, c = img_shape txn.put(key_byte, img_byte) # write meta information txt_file.write(f'{key}.png ({h},{w},{c}) {compress_level}\n') if idx % batch == 0: txn.commit() txn = env.begin(write=True) pbar.close() txn.commit() env.close() txt_file.close() print('\nFinish writing lmdb.')
Make lmdb from images. Contents of lmdb. The file structure is: :: example.lmdb ├── data.mdb ├── lock.mdb ├── meta_info.txt The data.mdb and lock.mdb are standard lmdb files and you can refer to https://lmdb.readthedocs.io/en/release/ for more details. The meta_info.txt is a specified txt file to record the meta information of our datasets. It will be automatically created when preparing datasets by our provided dataset tools. Each line in the txt file records 1)image name (with extension), 2)image shape, and 3)compression level, separated by a white space. For example, the meta information could be: `000_00000000.png (720,1280,3) 1`, which means: 1) image name (with extension): 000_00000000.png; 2) image shape: (720,1280,3); 3) compression level: 1 We use the image name without extension as the lmdb key. If `multiprocessing_read` is True, it will read all the images to memory using multiprocessing. Thus, your server needs to have enough memory. Args: data_path (str): Data path for reading images. lmdb_path (str): Lmdb save path. img_path_list (str): Image path list. keys (str): Used for lmdb keys. batch (int): After processing batch images, lmdb commits. Default: 5000. compress_level (int): Compress level when encoding images. Default: 1. multiprocessing_read (bool): Whether use multiprocessing to read all the images to memory. Default: False. n_thread (int): For multiprocessing. map_size (int | None): Map size for lmdb env. If None, use the estimated size from images. Default: None
2,645
import math import os import requests from torch.hub import download_url_to_file, get_dir from tqdm import tqdm from urllib.parse import urlparse from .misc import sizeof_fmt def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination, file_size=None, chunk_size=32768): if file_size is not None: pbar = tqdm(total=math.ceil(file_size / chunk_size), unit='chunk') readable_file_size = sizeof_fmt(file_size) else: pbar = None with open(destination, 'wb') as f: downloaded_size = 0 for chunk in response.iter_content(chunk_size): downloaded_size += chunk_size if pbar is not None: pbar.update(1) pbar.set_description(f'Download {sizeof_fmt(downloaded_size)} / {readable_file_size}') if chunk: # filter out keep-alive new chunks f.write(chunk) if pbar is not None: pbar.close() The provided code snippet includes necessary dependencies for implementing the `download_file_from_google_drive` function. Write a Python function `def download_file_from_google_drive(file_id, save_path)` to solve the following problem: Download files from google drive. Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive Args: file_id (str): File id. save_path (str): Save path. Here is the function: def download_file_from_google_drive(file_id, save_path): """Download files from google drive. Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive Args: file_id (str): File id. save_path (str): Save path. """ session = requests.Session() URL = 'https://docs.google.com/uc?export=download' params = {'id': file_id} response = session.get(URL, params=params, stream=True) token = get_confirm_token(response) if token: params['confirm'] = token response = session.get(URL, params=params, stream=True) # get file size response_file_size = session.get(URL, params=params, stream=True, headers={'Range': 'bytes=0-2'}) if 'Content-Range' in response_file_size.headers: file_size = int(response_file_size.headers['Content-Range'].split('/')[1]) else: file_size = None save_response_content(response, save_path, file_size)
Download files from google drive. Reference: https://stackoverflow.com/questions/25010369/wget-curl-large-file-from-google-drive Args: file_id (str): File id. save_path (str): Save path.
2,646
import math import os import requests from torch.hub import download_url_to_file, get_dir from tqdm import tqdm from urllib.parse import urlparse from .misc import sizeof_fmt The provided code snippet includes necessary dependencies for implementing the `load_file_from_url` function. Write a Python function `def load_file_from_url(url, model_dir=None, progress=True, file_name=None)` to solve the following problem: Load file form http url, will download models if necessary. Reference: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py Args: url (str): URL to be downloaded. model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir. Default: None. progress (bool): Whether to show the download progress. Default: True. file_name (str): The downloaded file name. If None, use the file name in the url. Default: None. Returns: str: The path to the downloaded file. Here is the function: def load_file_from_url(url, model_dir=None, progress=True, file_name=None): """Load file form http url, will download models if necessary. Reference: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py Args: url (str): URL to be downloaded. model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir. Default: None. progress (bool): Whether to show the download progress. Default: True. file_name (str): The downloaded file name. If None, use the file name in the url. Default: None. Returns: str: The path to the downloaded file. """ if model_dir is None: # use the pytorch hub_dir hub_dir = get_dir() model_dir = os.path.join(hub_dir, 'checkpoints') os.makedirs(model_dir, exist_ok=True) parts = urlparse(url) filename = os.path.basename(parts.path) if file_name is not None: filename = file_name cached_file = os.path.abspath(os.path.join(model_dir, filename)) if not os.path.exists(cached_file): print(f'Downloading: "{url}" to {cached_file}\n') download_url_to_file(url, cached_file, hash_prefix=None, progress=progress) return cached_file
Load file form http url, will download models if necessary. Reference: https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py Args: url (str): URL to be downloaded. model_dir (str): The path to save the downloaded model. Should be a full path. If None, use pytorch hub_dir. Default: None. progress (bool): Whether to show the download progress. Default: True. file_name (str): The downloaded file name. If None, use the file name in the url. Default: None. Returns: str: The path to the downloaded file.
2,647
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `img2tensor` function. Write a Python function `def img2tensor(imgs, bgr2rgb=True, float32=True)` to solve the following problem: Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor. Here is the function: def img2tensor(imgs, bgr2rgb=True, float32=True): """Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor. """ def _totensor(img, bgr2rgb, float32): if img.shape[2] == 3 and bgr2rgb: if img.dtype == 'float64': img = img.astype('float32') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = torch.from_numpy(img.transpose(2, 0, 1)) if float32: img = img.float() return img if isinstance(imgs, list): return [_totensor(img, bgr2rgb, float32) for img in imgs] else: return _totensor(imgs, bgr2rgb, float32)
Numpy array to tensor. Args: imgs (list[ndarray] | ndarray): Input images. bgr2rgb (bool): Whether to change bgr to rgb. float32 (bool): Whether to change to float32. Returns: list[tensor] | tensor: Tensor images. If returned results only have one element, just return tensor.
2,648
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `tensor2img` function. Write a Python function `def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1))` to solve the following problem: Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR. Here is the function: def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): """Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR. """ if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') if torch.is_tensor(tensor): tensor = [tensor] result = [] for _tensor in tensor: _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) n_dim = _tensor.dim() if n_dim == 4: img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() img_np = img_np.transpose(1, 2, 0) if rgb2bgr: img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) elif n_dim == 3: img_np = _tensor.numpy() img_np = img_np.transpose(1, 2, 0) if img_np.shape[2] == 1: # gray image img_np = np.squeeze(img_np, axis=2) else: if rgb2bgr: img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) elif n_dim == 2: img_np = _tensor.numpy() else: raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}') if out_type == np.uint8: # Unlike MATLAB, numpy.unit8() WILL NOT round by default. img_np = (img_np * 255.0).round() img_np = img_np.astype(out_type) result.append(img_np) if len(result) == 1 and torch.is_tensor(tensor): result = result[0] return result
Convert torch Tensors into image numpy arrays. After clamping to [min, max], values will be normalized to [0, 1]. Args: tensor (Tensor or list[Tensor]): Accept shapes: 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); 2) 3D Tensor of shape (3/1 x H x W); 3) 2D Tensor of shape (H x W). Tensor channel should be in RGB order. rgb2bgr (bool): Whether to change rgb to bgr. out_type (numpy type): output types. If ``np.uint8``, transform outputs to uint8 type with range [0, 255]; otherwise, float type with range [0, 1]. Default: ``np.uint8``. min_max (tuple[int]): min and max values for clamp. Returns: (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of shape (H x W). The channel order is BGR.
2,649
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `tensor2img_fast` function. Write a Python function `def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1))` to solve the following problem: This implementation is slightly faster than tensor2img. It now only supports torch tensor with shape (1, c, h, w). Args: tensor (Tensor): Now only support torch tensor with (1, c, h, w). rgb2bgr (bool): Whether to change rgb to bgr. Default: True. min_max (tuple[int]): min and max values for clamp. Here is the function: def tensor2img_fast(tensor, rgb2bgr=True, min_max=(0, 1)): """This implementation is slightly faster than tensor2img. It now only supports torch tensor with shape (1, c, h, w). Args: tensor (Tensor): Now only support torch tensor with (1, c, h, w). rgb2bgr (bool): Whether to change rgb to bgr. Default: True. min_max (tuple[int]): min and max values for clamp. """ output = tensor.squeeze(0).detach().clamp_(*min_max).permute(1, 2, 0) output = (output - min_max[0]) / (min_max[1] - min_max[0]) * 255 output = output.type(torch.uint8).cpu().numpy() if rgb2bgr: output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR) return output
This implementation is slightly faster than tensor2img. It now only supports torch tensor with shape (1, c, h, w). Args: tensor (Tensor): Now only support torch tensor with (1, c, h, w). rgb2bgr (bool): Whether to change rgb to bgr. Default: True. min_max (tuple[int]): min and max values for clamp.
2,650
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `imfrombytes` function. Write a Python function `def imfrombytes(content, flag='color', float32=False)` to solve the following problem: Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array. Here is the function: def imfrombytes(content, flag='color', float32=False): """Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array. """ img_np = np.frombuffer(content, np.uint8) imread_flags = {'color': cv2.IMREAD_COLOR, 'grayscale': cv2.IMREAD_GRAYSCALE, 'unchanged': cv2.IMREAD_UNCHANGED} img = cv2.imdecode(img_np, imread_flags[flag]) if float32: img = img.astype(np.float32) / 255. return img
Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. float32 (bool): Whether to change to float32., If True, will also norm to [0, 1]. Default: False. Returns: ndarray: Loaded image array.
2,651
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `imwrite` function. Write a Python function `def imwrite(img, file_path, params=None, auto_mkdir=True)` to solve the following problem: Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. Here is the function: def imwrite(img, file_path, params=None, auto_mkdir=True): """Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. """ if auto_mkdir: dir_name = os.path.abspath(os.path.dirname(file_path)) os.makedirs(dir_name, exist_ok=True) ok = cv2.imwrite(file_path, img, params) if not ok: raise IOError('Failed in writing images.')
Write image to file. Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not.
2,652
import cv2 import math import numpy as np import os import torch from torchvision.utils import make_grid The provided code snippet includes necessary dependencies for implementing the `crop_border` function. Write a Python function `def crop_border(imgs, crop_border)` to solve the following problem: Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images. Here is the function: def crop_border(imgs, crop_border): """Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images. """ if crop_border == 0: return imgs else: if isinstance(imgs, list): return [v[crop_border:-crop_border, crop_border:-crop_border, ...] for v in imgs] else: return imgs[crop_border:-crop_border, crop_border:-crop_border, ...]
Crop borders of images. Args: imgs (list[ndarray] | ndarray): Images with shape (h, w, c). crop_border (int): Crop border for each end of height and weight. Returns: list[ndarray]: Cropped images.
2,653
import numpy as np import os import random import time import torch from os import path as osp from .dist_util import master_only The provided code snippet includes necessary dependencies for implementing the `set_random_seed` function. Write a Python function `def set_random_seed(seed)` to solve the following problem: Set random seeds. Here is the function: def set_random_seed(seed): """Set random seeds.""" random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed)
Set random seeds.
2,654
import numpy as np import os import random import time import torch from os import path as osp from .dist_util import master_only def mkdir_and_rename(path): """mkdirs. If path exists, rename it with timestamp and create a new one. Args: path (str): Folder path. """ if osp.exists(path): new_name = path + '_archived_' + get_time_str() print(f'Path already exists. Rename it to {new_name}', flush=True) os.rename(path, new_name) os.makedirs(path, exist_ok=True) The provided code snippet includes necessary dependencies for implementing the `make_exp_dirs` function. Write a Python function `def make_exp_dirs(opt)` to solve the following problem: Make dirs for experiments. Here is the function: def make_exp_dirs(opt): """Make dirs for experiments.""" path_opt = opt['path'].copy() if opt['is_train']: mkdir_and_rename(path_opt.pop('experiments_root')) else: mkdir_and_rename(path_opt.pop('results_root')) for key, path in path_opt.items(): if ('strict_load' in key) or ('pretrain_network' in key) or ('resume' in key) or ('param_key' in key): continue else: os.makedirs(path, exist_ok=True)
Make dirs for experiments.
2,655
import numpy as np import os import random import time import torch from os import path as osp from .dist_util import master_only The provided code snippet includes necessary dependencies for implementing the `scandir` function. Write a Python function `def scandir(dir_path, suffix=None, recursive=False, full_path=False)` to solve the following problem: Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative paths. Here is the function: def scandir(dir_path, suffix=None, recursive=False, full_path=False): """Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative paths. """ if (suffix is not None) and not isinstance(suffix, (str, tuple)): raise TypeError('"suffix" must be a string or tuple of strings') root = dir_path def _scandir(dir_path, suffix, recursive): for entry in os.scandir(dir_path): if not entry.name.startswith('.') and entry.is_file(): if full_path: return_path = entry.path else: return_path = osp.relpath(entry.path, root) if suffix is None: yield return_path elif return_path.endswith(suffix): yield return_path else: if recursive: yield from _scandir(entry.path, suffix=suffix, recursive=recursive) else: continue return _scandir(dir_path, suffix=suffix, recursive=recursive)
Scan a directory to find the interested files. Args: dir_path (str): Path of the directory. suffix (str | tuple(str), optional): File suffix that we are interested in. Default: None. recursive (bool, optional): If set to True, recursively scan the directory. Default: False. full_path (bool, optional): If set to True, include the dir_path. Default: False. Returns: A generator for all the interested files with relative paths.
2,656
import numpy as np import os import random import time import torch from os import path as osp from .dist_util import master_only The provided code snippet includes necessary dependencies for implementing the `check_resume` function. Write a Python function `def check_resume(opt, resume_iter)` to solve the following problem: Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration. Here is the function: def check_resume(opt, resume_iter): """Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration. """ if opt['path']['resume_state']: # get all the networks networks = [key for key in opt.keys() if key.startswith('network_')] flag_pretrain = False for network in networks: if opt['path'].get(f'pretrain_{network}') is not None: flag_pretrain = True if flag_pretrain: print('pretrain_network path will be ignored during resuming.') # set pretrained model paths for network in networks: name = f'pretrain_{network}' basename = network.replace('network_', '') if opt['path'].get('ignore_resume_networks') is None or (network not in opt['path']['ignore_resume_networks']): opt['path'][name] = osp.join(opt['path']['models'], f'net_{basename}_{resume_iter}.pth') print(f"Set {name} to {opt['path'][name]}") # change param_key to params in resume param_keys = [key for key in opt['path'].keys() if key.startswith('param_key')] for param_key in param_keys: if opt['path'][param_key] == 'params_ema': opt['path'][param_key] = 'params' print(f'Set {param_key} to params')
Check resume states and pretrain_network paths. Args: opt (dict): Options. resume_iter (int): Resume iteration.
2,657
import cv2 import numpy as np import os def dequantize_flow(dx, dy, max_val=0.02, denorm=True): """Recover from quantized flow. Args: dx (ndarray): Quantized dx. dy (ndarray): Quantized dy. max_val (float): Maximum value used when quantizing. denorm (bool): Whether to multiply flow values with width/height. Returns: ndarray: Dequantized flow. """ assert dx.shape == dy.shape assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow The provided code snippet includes necessary dependencies for implementing the `flowread` function. Write a Python function `def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs)` to solve the following problem: Read an optical flow map. Args: flow_path (ndarray or str): Flow path. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array Here is the function: def flowread(flow_path, quantize=False, concat_axis=0, *args, **kwargs): """Read an optical flow map. Args: flow_path (ndarray or str): Flow path. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array """ if quantize: assert concat_axis in [0, 1] cat_flow = cv2.imread(flow_path, cv2.IMREAD_UNCHANGED) if cat_flow.ndim != 2: raise IOError(f'{flow_path} is not a valid quantized flow file, its dimension is {cat_flow.ndim}.') assert cat_flow.shape[concat_axis] % 2 == 0 dx, dy = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) else: with open(flow_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError(f'Invalid flow file: {flow_path}') else: if header != 'PIEH': raise IOError(f'Invalid flow file: {flow_path}, header does not contain PIEH') w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) return flow.astype(np.float32)
Read an optical flow map. Args: flow_path (ndarray or str): Flow path. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array
2,658
import cv2 import numpy as np import os def quantize_flow(flow, max_val=0.02, norm=True): """Quantize flow to [0, 255]. After this step, the size of flow will be much smaller, and can be dumped as jpeg images. Args: flow (ndarray): (h, w, 2) array of optical flow. max_val (float): Maximum value of flow, values beyond [-max_val, max_val] will be truncated. norm (bool): Whether to divide flow values by image width/height. Returns: tuple[ndarray]: Quantized dx and dy. """ h, w, _ = flow.shape dx = flow[..., 0] dy = flow[..., 1] if norm: dx = dx / w # avoid inplace operations dy = dy / h # use 255 levels instead of 256 to make sure 0 is 0 after dequantization. flow_comps = [quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]] return tuple(flow_comps) The provided code snippet includes necessary dependencies for implementing the `flowwrite` function. Write a Python function `def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs)` to solve the following problem: Write optical flow to file. If the flow is not quantized, it will be saved as a .flo file losslessly, otherwise a jpeg image which is lossy but of much smaller size. (dx and dy will be concatenated horizontally into a single image if quantize is True.) Args: flow (ndarray): (h, w, 2) array of optical flow. filename (str): Output filepath. quantize (bool): Whether to quantize the flow and save it to 2 jpeg images. If set to True, remaining args will be passed to :func:`quantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Here is the function: def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs): """Write optical flow to file. If the flow is not quantized, it will be saved as a .flo file losslessly, otherwise a jpeg image which is lossy but of much smaller size. (dx and dy will be concatenated horizontally into a single image if quantize is True.) Args: flow (ndarray): (h, w, 2) array of optical flow. filename (str): Output filepath. quantize (bool): Whether to quantize the flow and save it to 2 jpeg images. If set to True, remaining args will be passed to :func:`quantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. """ if not quantize: with open(filename, 'wb') as f: f.write('PIEH'.encode('utf-8')) np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f) flow = flow.astype(np.float32) flow.tofile(f) f.flush() else: assert concat_axis in [0, 1] dx, dy = quantize_flow(flow, *args, **kwargs) dxdy = np.concatenate((dx, dy), axis=concat_axis) os.makedirs(os.path.dirname(filename), exist_ok=True) cv2.imwrite(filename, dxdy)
Write optical flow to file. If the flow is not quantized, it will be saved as a .flo file losslessly, otherwise a jpeg image which is lossy but of much smaller size. (dx and dy will be concatenated horizontally into a single image if quantize is True.) Args: flow (ndarray): (h, w, 2) array of optical flow. filename (str): Output filepath. quantize (bool): Whether to quantize the flow and save it to 2 jpeg images. If set to True, remaining args will be passed to :func:`quantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False.
2,659
import cv2 import numpy as np import torch from torch.nn import functional as F The provided code snippet includes necessary dependencies for implementing the `filter2D` function. Write a Python function `def filter2D(img, kernel)` to solve the following problem: PyTorch version of cv2.filter2D Args: img (Tensor): (b, c, h, w) kernel (Tensor): (b, k, k) Here is the function: def filter2D(img, kernel): """PyTorch version of cv2.filter2D Args: img (Tensor): (b, c, h, w) kernel (Tensor): (b, k, k) """ k = kernel.size(-1) b, c, h, w = img.size() if k % 2 == 1: img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect') else: raise ValueError('Wrong kernel size') ph, pw = img.size()[-2:] if kernel.size(0) == 1: # apply the same kernel to all batch images img = img.view(b * c, 1, ph, pw) kernel = kernel.view(1, 1, k, k) return F.conv2d(img, kernel, padding=0).view(b, c, h, w) else: img = img.view(1, b * c, ph, pw) kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k) return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w)
PyTorch version of cv2.filter2D Args: img (Tensor): (b, c, h, w) kernel (Tensor): (b, k, k)