id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
2,059
import numpy as np import matplotlib.pyplot as plt from scipy.special import binom from numpy.linalg import norm def num_bezier(n_ctrl, degree=3): def plot_control_polygon(Cp, degree=3, lw=0.5, linecolor=np.ones(3)*0.1): n_bezier = num_bezier(len(Cp), degree) for i in range(n_bezier): cp = Cp[i*degree:i*degree+degree+1, :] if degree==3: plt.plot(cp[0:2,0], cp[0:2, 1], ':', color=linecolor, linewidth=lw) plt.plot(cp[2:,0], cp[2:,1], ':', color=linecolor, linewidth=lw) plt.plot(cp[:,0], cp[:,1], 'o', color=[0, 0.5, 1.], markersize=4) else: plt.plot(cp[:,0], cp[:,1], ':', color=linecolor, linewidth=lw) plt.plot(cp[:,0], cp[:,1], 'o', color=[0, 0.5, 1.])
null
2,060
import argparse import traceback import shutil import logging import yaml import sys import os import torch import numpy as np import torch.utils.tensorboard as tb from runners.diffusion import Diffusion torch.set_printoptions(sci_mode=False) def dict2namespace(config): namespace = argparse.Namespace() for key, value in config.items(): if isinstance(value, dict): new_value = dict2namespace(value) else: new_value = value setattr(namespace, key, new_value) return namespace def parse_args_and_config(): parser = argparse.ArgumentParser(description=globals()["__doc__"]) parser.add_argument( "--config", type=str, required=True, help="Path to the config file" ) parser.add_argument("--seed", type=int, default=1234, help="Random seed") parser.add_argument( "--exp", type=str, default="exp", help="Path for saving running related data." ) parser.add_argument( "--doc", type=str, required=True, help="A string for documentation purpose. " "Will be the name of the log folder.", ) parser.add_argument( "--comment", type=str, default="", help="A string for experiment comment" ) parser.add_argument( "--verbose", type=str, default="info", help="Verbose level: info | debug | warning | critical", ) parser.add_argument("--test", action="store_true", help="Whether to test the model") parser.add_argument( "--sample", action="store_true", help="Whether to produce samples from the model", ) parser.add_argument("--fid", action="store_true") parser.add_argument("--interpolation", action="store_true") parser.add_argument( "--resume_training", action="store_true", help="Whether to resume training" ) parser.add_argument( "-i", "--image_folder", type=str, default="images", help="The folder name of samples", ) parser.add_argument( "--ni", action="store_true", help="No interaction. Suitable for Slurm Job launcher", ) parser.add_argument("--use_pretrained", action="store_true") parser.add_argument( "--sample_type", type=str, default="generalized", help="sampling approach (generalized or ddpm_noisy)", ) parser.add_argument( "--skip_type", type=str, default="uniform", help="skip according to (uniform or quadratic)", ) parser.add_argument( "--timesteps", type=int, default=1000, help="number of steps involved" ) parser.add_argument( "--eta", type=float, default=0.0, help="eta used to control the variances of sigma", ) parser.add_argument("--sequence", action="store_true") args = parser.parse_args() args.log_path = os.path.join(args.exp, "logs", args.doc) # parse config file with open(os.path.join("configs", args.config), "r") as f: config = yaml.safe_load(f) new_config = dict2namespace(config) tb_path = os.path.join(args.exp, "tensorboard", args.doc) if not args.test and not args.sample: if not args.resume_training: if os.path.exists(args.log_path): overwrite = False if args.ni: overwrite = True else: response = input("Folder already exists. Overwrite? (Y/N)") if response.upper() == "Y": overwrite = True if overwrite: shutil.rmtree(args.log_path) shutil.rmtree(tb_path) os.makedirs(args.log_path) if os.path.exists(tb_path): shutil.rmtree(tb_path) else: print("Folder exists. Program halted.") sys.exit(0) else: os.makedirs(args.log_path) with open(os.path.join(args.log_path, "config.yml"), "w") as f: yaml.dump(new_config, f, default_flow_style=False) new_config.tb_logger = tb.SummaryWriter(log_dir=tb_path) # setup logger level = getattr(logging, args.verbose.upper(), None) if not isinstance(level, int): raise ValueError("level {} not supported".format(args.verbose)) handler1 = logging.StreamHandler() handler2 = logging.FileHandler(os.path.join(args.log_path, "stdout.txt")) formatter = logging.Formatter( "%(levelname)s - %(filename)s - %(asctime)s - %(message)s" ) handler1.setFormatter(formatter) handler2.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(handler1) logger.addHandler(handler2) logger.setLevel(level) else: level = getattr(logging, args.verbose.upper(), None) if not isinstance(level, int): raise ValueError("level {} not supported".format(args.verbose)) handler1 = logging.StreamHandler() formatter = logging.Formatter( "%(levelname)s - %(filename)s - %(asctime)s - %(message)s" ) handler1.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(handler1) logger.setLevel(level) if args.sample: os.makedirs(os.path.join(args.exp, "image_samples"), exist_ok=True) args.image_folder = os.path.join( args.exp, "image_samples", args.image_folder ) if not os.path.exists(args.image_folder): os.makedirs(args.image_folder) else: if not (args.fid or args.interpolation): overwrite = False if args.ni: overwrite = True else: response = input( f"Image folder {args.image_folder} already exists. Overwrite? (Y/N)" ) if response.upper() == "Y": overwrite = True if overwrite: shutil.rmtree(args.image_folder) os.makedirs(args.image_folder) else: print("Output image folder exists. Program halted.") sys.exit(0) # add device device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") logging.info("Using device: {}".format(device)) new_config.device = device # set random seed torch.manual_seed(args.seed) np.random.seed(args.seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.benchmark = True return args, new_config
null
2,061
import torch def noise_estimation_loss(model, x0: torch.Tensor, t: torch.LongTensor, e: torch.Tensor, b: torch.Tensor, keepdim=False): a = (1-b).cumprod(dim=0).index_select(0, t).view(-1, 1, 1, 1) x = x0 * a.sqrt() + e * (1.0 - a).sqrt() output = model(x, t.float()) if keepdim: return (e - output).square().sum(dim=(1, 2, 3)) else: return (e - output).square().sum(dim=(1, 2, 3)).mean(dim=0)
null
2,062
import os, hashlib import requests from tqdm import tqdm URL_MAP = { "cifar10": "https://heibox.uni-heidelberg.de/f/869980b53bf5416c8a28/?dl=1", "ema_cifar10": "https://heibox.uni-heidelberg.de/f/2e4f01e2d9ee49bab1d5/?dl=1", "lsun_bedroom": "https://heibox.uni-heidelberg.de/f/f179d4f21ebc4d43bbfe/?dl=1", "ema_lsun_bedroom": "https://heibox.uni-heidelberg.de/f/b95206528f384185889b/?dl=1", "lsun_cat": "https://heibox.uni-heidelberg.de/f/fac870bd988348eab88e/?dl=1", "ema_lsun_cat": "https://heibox.uni-heidelberg.de/f/0701aac3aa69457bbe34/?dl=1", "lsun_church": "https://heibox.uni-heidelberg.de/f/2711a6f712e34b06b9d8/?dl=1", "ema_lsun_church": "https://heibox.uni-heidelberg.de/f/44ccb50ef3c6436db52e/?dl=1", } CKPT_MAP = { "cifar10": "diffusion_cifar10_model/model-790000.ckpt", "ema_cifar10": "ema_diffusion_cifar10_model/model-790000.ckpt", "lsun_bedroom": "diffusion_lsun_bedroom_model/model-2388000.ckpt", "ema_lsun_bedroom": "ema_diffusion_lsun_bedroom_model/model-2388000.ckpt", "lsun_cat": "diffusion_lsun_cat_model/model-1761000.ckpt", "ema_lsun_cat": "ema_diffusion_lsun_cat_model/model-1761000.ckpt", "lsun_church": "diffusion_lsun_church_model/model-4432000.ckpt", "ema_lsun_church": "ema_diffusion_lsun_church_model/model-4432000.ckpt", } MD5_MAP = { "cifar10": "82ed3067fd1002f5cf4c339fb80c4669", "ema_cifar10": "1fa350b952534ae442b1d5235cce5cd3", "lsun_bedroom": "f70280ac0e08b8e696f42cb8e948ff1c", "ema_lsun_bedroom": "1921fa46b66a3665e450e42f36c2720f", "lsun_cat": "bbee0e7c3d7abfb6e2539eaf2fb9987b", "ema_lsun_cat": "646f23f4821f2459b8bafc57fd824558", "lsun_church": "eb619b8a5ab95ef80f94ce8a5488dae3", "ema_lsun_church": "fdc68a23938c2397caba4a260bc2445f", } def download(url, local_path, chunk_size=1024): os.makedirs(os.path.split(local_path)[0], exist_ok=True) with requests.get(url, stream=True) as r: total_size = int(r.headers.get("content-length", 0)) with tqdm(total=total_size, unit="B", unit_scale=True) as pbar: with open(local_path, "wb") as f: for data in r.iter_content(chunk_size=chunk_size): if data: f.write(data) pbar.update(chunk_size) def md5_hash(path): with open(path, "rb") as f: content = f.read() return hashlib.md5(content).hexdigest() def get_ckpt_path(name, root=None, check=False): if 'church_outdoor' in name: name = name.replace('church_outdoor', 'church') assert name in URL_MAP # Modify the path when necessary cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("/atlas/u/tsong/.cache")) root = ( root if root is not None else os.path.join(cachedir, "diffusion_models_converted") ) path = os.path.join(root, CKPT_MAP[name]) if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]): print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path)) download(URL_MAP[name], path) md5 = md5_hash(path) assert md5 == MD5_MAP[name], md5 return path
null
2,063
import torch def compute_alpha(beta, t): beta = torch.cat([torch.zeros(1).to(beta.device), beta], dim=0) a = (1 - beta).cumprod(dim=0).index_select(0, t + 1).view(-1, 1, 1, 1) return a def generalized_steps(x, seq, model, b, **kwargs): with torch.no_grad(): n = x.size(0) seq_next = [-1] + list(seq[:-1]) x0_preds = [] xs = [x] for i, j in zip(reversed(seq), reversed(seq_next)): t = (torch.ones(n) * i).to(x.device) next_t = (torch.ones(n) * j).to(x.device) at = compute_alpha(b, t.long()) at_next = compute_alpha(b, next_t.long()) xt = xs[-1].to('cuda') et = model(xt, t) x0_t = (xt - et * (1 - at).sqrt()) / at.sqrt() x0_preds.append(x0_t.to('cpu')) c1 = ( kwargs.get("eta", 0) * ((1 - at / at_next) * (1 - at_next) / (1 - at)).sqrt() ) c2 = ((1 - at_next) - c1 ** 2).sqrt() xt_next = at_next.sqrt() * x0_t + c1 * torch.randn_like(x) + c2 * et xs.append(xt_next.to('cpu')) return xs, x0_preds
null
2,064
import torch def compute_alpha(beta, t): beta = torch.cat([torch.zeros(1).to(beta.device), beta], dim=0) a = (1 - beta).cumprod(dim=0).index_select(0, t + 1).view(-1, 1, 1, 1) return a def ddpm_steps(x, seq, model, b, **kwargs): with torch.no_grad(): n = x.size(0) seq_next = [-1] + list(seq[:-1]) xs = [x] x0_preds = [] betas = b for i, j in zip(reversed(seq), reversed(seq_next)): t = (torch.ones(n) * i).to(x.device) next_t = (torch.ones(n) * j).to(x.device) at = compute_alpha(betas, t.long()) atm1 = compute_alpha(betas, next_t.long()) beta_t = 1 - at / atm1 x = xs[-1].to('cuda') output = model(x, t.float()) e = output x0_from_e = (1.0 / at).sqrt() * x - (1.0 / at - 1).sqrt() * e x0_from_e = torch.clamp(x0_from_e, -1, 1) x0_preds.append(x0_from_e.to('cpu')) mean_eps = ( (atm1.sqrt() * beta_t) * x0_from_e + ((1 - beta_t).sqrt() * (1 - atm1)) * x ) / (1.0 - at) mean = mean_eps noise = torch.randn_like(x) mask = 1 - (t == 0).float() mask = mask.view(-1, 1, 1, 1) logvar = beta_t.log() sample = mean + mask * torch.exp(0.5 * logvar) * noise xs.append(sample.to('cpu')) return xs, x0_preds
null
2,065
import os import logging import time import glob import numpy as np import tqdm import torch import torch.utils.data as data from models.diffusion import Model from models.ema import EMAHelper from functions import get_optimizer from functions.losses import loss_registry from datasets import get_dataset, data_transform, inverse_data_transform from functions.ckpt_util import get_ckpt_path import torchvision.utils as tvu def torch2hwcuint8(x, clip=False): if clip: x = torch.clamp(x, -1, 1) x = (x + 1.0) / 2.0 return x
null
2,066
import os import logging import time import glob import numpy as np import tqdm import torch import torch.utils.data as data from models.diffusion import Model from models.ema import EMAHelper from functions import get_optimizer from functions.losses import loss_registry from datasets import get_dataset, data_transform, inverse_data_transform from functions.ckpt_util import get_ckpt_path import torchvision.utils as tvu def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): def sigmoid(x): return 1 / (np.exp(-x) + 1) if beta_schedule == "quad": betas = ( np.linspace( beta_start ** 0.5, beta_end ** 0.5, num_diffusion_timesteps, dtype=np.float64, ) ** 2 ) elif beta_schedule == "linear": betas = np.linspace( beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 ) elif beta_schedule == "const": betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 betas = 1.0 / np.linspace( num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64 ) elif beta_schedule == "sigmoid": betas = np.linspace(-6, 6, num_diffusion_timesteps) betas = sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(beta_schedule) assert betas.shape == (num_diffusion_timesteps,) return betas
null
2,067
import os import os.path import hashlib import errno from torch.utils.model_zoo import tqdm def gen_bar_updater(): pbar = tqdm(total=None) def bar_update(count, block_size, total_size): if pbar.total is None and total_size: pbar.total = total_size progress_bytes = count * block_size pbar.update(progress_bytes - pbar.n) return bar_update def check_integrity(fpath, md5=None): if md5 is None: return True if not os.path.isfile(fpath): return False md5o = hashlib.md5() with open(fpath, 'rb') as f: # read in 1MB chunks for chunk in iter(lambda: f.read(1024 * 1024), b''): md5o.update(chunk) md5c = md5o.hexdigest() if md5c != md5: return False return True def makedir_exist_ok(dirpath): """ Python2 support for os.makedirs(.., exist_ok=True) """ try: os.makedirs(dirpath) except OSError as e: if e.errno == errno.EEXIST: pass else: raise The provided code snippet includes necessary dependencies for implementing the `download_url` function. Write a Python function `def download_url(url, root, filename=None, md5=None)` to solve the following problem: Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL md5 (str, optional): MD5 checksum of the download. If None, do not check Here is the function: def download_url(url, root, filename=None, md5=None): """Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL md5 (str, optional): MD5 checksum of the download. If None, do not check """ from six.moves import urllib root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) makedir_exist_ok(root) # downloads file if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: try: print('Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater() ) except OSError: if url[:5] == 'https': url = url.replace('https:', 'http:') print('Failed download. Trying https -> http instead.' ' Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater() )
Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL md5 (str, optional): MD5 checksum of the download. If None, do not check
2,068
import os import os.path import hashlib import errno from torch.utils.model_zoo import tqdm The provided code snippet includes necessary dependencies for implementing the `list_dir` function. Write a Python function `def list_dir(root, prefix=False)` to solve the following problem: List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found Here is the function: def list_dir(root, prefix=False): """List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found """ root = os.path.expanduser(root) directories = list( filter( lambda p: os.path.isdir(os.path.join(root, p)), os.listdir(root) ) ) if prefix is True: directories = [os.path.join(root, d) for d in directories] return directories
List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found
2,069
import os import os.path import hashlib import errno from torch.utils.model_zoo import tqdm The provided code snippet includes necessary dependencies for implementing the `list_files` function. Write a Python function `def list_files(root, suffix, prefix=False)` to solve the following problem: List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found Here is the function: def list_files(root, suffix, prefix=False): """List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found """ root = os.path.expanduser(root) files = list( filter( lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix), os.listdir(root) ) ) if prefix is True: files = [os.path.join(root, d) for d in files] return files
List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found
2,070
import os import os.path import hashlib import errno from torch.utils.model_zoo import tqdm def check_integrity(fpath, md5=None): if md5 is None: return True if not os.path.isfile(fpath): return False md5o = hashlib.md5() with open(fpath, 'rb') as f: # read in 1MB chunks for chunk in iter(lambda: f.read(1024 * 1024), b''): md5o.update(chunk) md5c = md5o.hexdigest() if md5c != md5: return False return True def makedir_exist_ok(dirpath): """ Python2 support for os.makedirs(.., exist_ok=True) """ try: os.makedirs(dirpath) except OSError as e: if e.errno == errno.EEXIST: pass else: raise def _get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def _save_response_content(response, destination, chunk_size=32768): with open(destination, "wb") as f: pbar = tqdm(total=None) progress = 0 for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) progress += len(chunk) pbar.update(progress - pbar.n) pbar.close() The provided code snippet includes necessary dependencies for implementing the `download_file_from_google_drive` function. Write a Python function `def download_file_from_google_drive(file_id, root, filename=None, md5=None)` to solve the following problem: Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check Here is the function: def download_file_from_google_drive(file_id, root, filename=None, md5=None): """Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url import requests url = "https://docs.google.com/uc?export=download" root = os.path.expanduser(root) if not filename: filename = file_id fpath = os.path.join(root, filename) makedir_exist_ok(root) if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() response = session.get(url, params={'id': file_id}, stream=True) token = _get_confirm_token(response) if token: params = {'id': file_id, 'confirm': token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath)
Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check
2,071
import math import torch import torch.nn as nn The provided code snippet includes necessary dependencies for implementing the `get_timestep_embedding` function. Write a Python function `def get_timestep_embedding(timesteps, embedding_dim)` to solve the following problem: This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". Here is the function: def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) return emb
This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need".
2,072
import math import torch import torch.nn as nn def nonlinearity(x): # swish return x*torch.sigmoid(x)
null
2,073
import math import torch import torch.nn as nn def Normalize(in_channels): return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
null
2,074
from concurrent.futures import ThreadPoolExecutor, as_completed from contextlib import suppress from itertools import cycle from json import load from logging import basicConfig, getLogger, shutdown from math import log2, trunc from multiprocessing import RawValue from os import urandom as randbytes from pathlib import Path from re import compile from random import choice as randchoice from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM, IPPROTO_ICMP, SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname, gethostname, socket) from ssl import CERT_NONE, SSLContext, create_default_context from struct import pack as data_pack from subprocess import run, PIPE from sys import argv from sys import exit as _exit from threading import Event, Thread from time import sleep, time from typing import Any, List, Set, Tuple from urllib import parse from uuid import UUID, uuid4 from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles from PyRoxy import Tools as ProxyTools from certifi import where from cloudscraper import create_scraper from dns import resolver from icmplib import ping from impacket.ImpactPacket import IP, TCP, UDP, Data, ICMP from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory from requests import Response, Session, exceptions, get, cookies from yarl import URL from base64 import b64encode logger = getLogger("MHDDoS") logger.setLevel("INFO") with open(__dir__ / "config.json") as f: con = load(f) class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKCYAN = '\033[96m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' RESET = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' def exit(*message): if message: logger.error(bcolors.FAIL + " ".join(message) + bcolors.RESET) shutdown() _exit(1) class ProxyManager: def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]: providrs = [ provider for provider in cf["proxy-providers"] if provider["type"] == Proxy_type or Proxy_type == 0 ] logger.info( f"{bcolors.WARNING}Downloading Proxies from {bcolors.OKBLUE}%d{bcolors.WARNING} Providers{bcolors.RESET}" % len( providrs)) proxes: Set[Proxy] = set() with ThreadPoolExecutor(len(providrs)) as executor: future_to_download = { executor.submit( ProxyManager.download, provider, ProxyType.stringToProxyType(str(provider["type"]))) for provider in providrs } for future in as_completed(future_to_download): for pro in future.result(): proxes.add(pro) return proxes def download(provider, proxy_type: ProxyType) -> Set[Proxy]: logger.debug( f"{bcolors.WARNING}Proxies from (URL: {bcolors.OKBLUE}%s{bcolors.WARNING}, Type: {bcolors.OKBLUE}%s{bcolors.WARNING}, Timeout: {bcolors.OKBLUE}%d{bcolors.WARNING}){bcolors.RESET}" % (provider["url"], proxy_type.name, provider["timeout"])) proxes: Set[Proxy] = set() with suppress(TimeoutError, exceptions.ConnectionError, exceptions.ReadTimeout): data = get(provider["url"], timeout=provider["timeout"]).text try: for proxy in ProxyUtiles.parseAllIPPort( data.splitlines(), proxy_type): proxes.add(proxy) except Exception as e: logger.error(f'Download Proxy Error: {(e.__str__() or e.__repr__())}') return proxes def handleProxyList(con, proxy_li, proxy_ty, url=None): if proxy_ty not in {4, 5, 1, 0, 6}: exit("Socks Type Not Found [4, 5, 1, 0, 6]") if proxy_ty == 6: proxy_ty = randchoice([4, 5, 1]) if not proxy_li.exists(): logger.warning( f"{bcolors.WARNING}The file doesn't exist, creating files and downloading proxies.{bcolors.RESET}") proxy_li.parent.mkdir(parents=True, exist_ok=True) with proxy_li.open("w") as wr: Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty) logger.info( f"{bcolors.OKBLUE}{len(Proxies):,}{bcolors.WARNING} Proxies are getting checked, this may take awhile{bcolors.RESET}!" ) Proxies = ProxyChecker.checkAll( Proxies, timeout=5, threads=threads, url=url.human_repr() if url else "http://httpbin.org/get", ) if not Proxies: exit( "Proxy Check failed, Your network may be the problem" " | The target may not be available." ) stringBuilder = "" for proxy in Proxies: stringBuilder += (proxy.__str__() + "\n") wr.write(stringBuilder) proxies = ProxyUtiles.readFromFile(proxy_li) if proxies: logger.info(f"{bcolors.WARNING}Proxy Count: {bcolors.OKBLUE}{len(proxies):,}{bcolors.RESET}") else: logger.info( f"{bcolors.WARNING}Empty Proxy File, running flood without proxy{bcolors.RESET}") proxies = None return proxies
null
2,075
import os import re import shutil import sys from setuptools import find_namespace_packages, setup with open('README.rst') as readme: long_description = readme.read() with open('requirements/base.txt') as fh: requirements = [r for r in fh.read().split('\n') if not r.startswith('#')] The provided code snippet includes necessary dependencies for implementing the `get_version` function. Write a Python function `def get_version(package)` to solve the following problem: Return package version as listed in `__version__` in `init.py`. Here is the function: def get_version(package): """ Return package version as listed in `__version__` in `init.py`. """ init_py = open(os.path.join(package, '__init__.py')).read() return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
Return package version as listed in `__version__` in `init.py`.
2,076
from docutils.nodes import Text from sphinx.util import logging def default_role_error( name, rawtext, text, lineno, inliner, options=None, content=None ): logger.warning( ( f"Default role used (`single backticks`): {rawtext}. Did you mean to use " "two backticks for ``code``, or miss an underscore for a `link`_ ?" ), location=(inliner.document.current_source, lineno), ) return [Text(text)], [] def setup(app): app.add_role("default-role-error", default_role_error) return {"parallel_read_safe": True}
null
2,077
from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.response import Response from rest_framework.reverse import reverse from rest_framework.views import APIView from drf_spectacular.plumbing import get_relative_url, set_query_parameters from drf_spectacular.settings import spectacular_settings from drf_spectacular.utils import extend_schema from drf_spectacular.views import AUTHENTICATION_CLASSES def set_query_parameters(url, **kwargs) -> str: def get_relative_url(url: str) -> str: def _get_schema_url(self, request): schema_url = self.url or get_relative_url(reverse(self.url_name, request=request)) return set_query_parameters( url=schema_url, lang=request.GET.get('lang'), version=request.GET.get('version') )
null
2,078
from drf_spectacular.contrib.rest_polymorphic import PolymorphicSerializerExtension from drf_spectacular.plumbing import ResolvedComponent from drf_spectacular.serializers import PolymorphicProxySerializerExtension from drf_spectacular.settings import spectacular_settings def rollup_properties(component, resolved_sub_serializers): # rollup already happened (spectacular bug and normally not needed) if any('allOf' in r.schema for r in resolved_sub_serializers): return all_field_sets = [ set(list(r.schema['properties'])) for r in resolved_sub_serializers ] common_fields = all_field_sets[0].intersection(*all_field_sets[1:]) common_schema = { 'properties': {}, 'required': set(), } # substitute sub serializers' common fields with base class for r in resolved_sub_serializers: for cf in sorted(common_fields): if cf in r.schema['properties']: common_schema['properties'][cf] = r.schema['properties'][cf] del r.schema['properties'][cf] if cf in r.schema.get('required', []): common_schema['required'].add(cf) r.schema = {'allOf': [component.ref, r.schema]} # modify regular schema for field rollup del component.schema['oneOf'] component.schema['properties'] = common_schema['properties'] if common_schema['required']: component.schema['required'] = sorted(common_schema['required'])
null
2,079
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def get_class(obj) -> type: return obj if inspect.isclass(obj) else obj.__class__ def is_serializer(obj, strict=False) -> TypeGuard[_SerializerType]: from drf_spectacular.extensions import OpenApiSerializerExtension return ( isinstance(force_instance(obj), serializers.BaseSerializer) or (bool(OpenApiSerializerExtension.get_match(obj)) and not strict) ) def get_list_serializer(obj: Any): return force_instance(obj) if is_list_serializer(obj) else get_class(obj)(many=True, context=obj.context) def is_list_serializer_customized(obj) -> bool: return ( is_serializer(obj, strict=True) and get_class(get_list_serializer(obj)).to_representation # type: ignore is not serializers.ListSerializer.to_representation )
null
2,080
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def force_instance(serializer_or_field): def is_serializer(obj, strict=False) -> TypeGuard[_SerializerType]: _FieldType = Union[Field, Type[Field]] def is_field(obj: Any) -> TypeGuard[_FieldType]: # make sure obj is a serializer field and nothing else. # guard against serializers because BaseSerializer(Field) return isinstance(force_instance(obj), fields.Field) and not is_serializer(obj)
null
2,081
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) def is_patched_serializer(serializer, direction) -> bool: return bool( spectacular_settings.COMPONENT_SPLIT_PATCH and getattr(serializer, 'partial', None) and not getattr(serializer, 'read_only', None) and not (spectacular_settings.COMPONENT_SPLIT_REQUEST and direction == 'response') )
null
2,082
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def is_trivial_string_variation(a: str, b: str) -> bool: a = (a or '').strip().lower().replace(' ', '_').replace('-', '_') b = (b or '').strip().lower().replace(' ', '_').replace('-', '_') return a == b
null
2,083
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def is_basic_serializer(obj: Any) -> TypeGuard[_SerializerType]: return is_serializer(obj) and not is_list_serializer(obj) def assert_basic_serializer(serializer) -> None: assert is_basic_serializer(serializer), ( f'internal assumption violated because we expected a basic serializer here and ' f'instead got a "{serializer}". This may be the result of another app doing ' f'some unexpected magic or an invalid internal call. Feel free to report this ' f'as a bug at https://github.com/tfranzel/drf-spectacular/issues' )
null
2,084
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def get_lib_doc_excludes(): # do not import on package level due to potential import recursion when loading # extensions as recommended: USER's settings.py -> USER EXTENSIONS -> extensions.py # -> plumbing.py -> DRF views -> DRF DefaultSchema -> openapi.py - plumbing.py -> Loop from rest_framework import generics, views, viewsets return [ object, dict, views.APIView, *[getattr(serializers, c) for c in dir(serializers) if c.endswith('Serializer')], *[getattr(viewsets, c) for c in dir(viewsets) if c.endswith('ViewSet')], *[getattr(generics, c) for c in dir(generics) if c.endswith('APIView')], *[getattr(mixins, c) for c in dir(mixins) if c.endswith('Mixin')], ]
null
2,085
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') The provided code snippet includes necessary dependencies for implementing the `get_view_model` function. Write a Python function `def get_view_model(view, emit_warnings=True)` to solve the following problem: obtain model from view via view's queryset. try safer view attribute first before going through get_queryset(), which may perform arbitrary operations. Here is the function: def get_view_model(view, emit_warnings=True): """ obtain model from view via view's queryset. try safer view attribute first before going through get_queryset(), which may perform arbitrary operations. """ model = getattr(getattr(view, 'queryset', None), 'model', None) if model is not None: return model try: return view.get_queryset().model except Exception as exc: if emit_warnings: warn( f'Failed to obtain model through view\'s queryset due to raised exception. ' f'Prevent this either by setting "queryset = Model.objects.none()" on the ' f'view, checking for "getattr(self, "swagger_fake_view", False)" in ' f'get_queryset() or by simply using @extend_schema. (Exception: {exc})' )
obtain model from view via view's queryset. try safer view attribute first before going through get_queryset(), which may perform arbitrary operations.
2,086
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) if hasattr(types, 'UnionType'): UNION_TYPES: Tuple[Any, ...] = (Union, types.UnionType) else: UNION_TYPES = (Union,) def error(msg: str, delayed: Any = None) -> None: if delayed: errors = get_override(delayed, 'errors', []) errors.append(msg) set_override(delayed, 'errors', errors) else: GENERATOR_STATS.emit(msg, 'error') spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) def get_manager(model): if not hasattr(model, spectacular_settings.DEFAULT_QUERY_MANAGER): error( f'Failed to obtain queryset from model "{model.__name__}" because manager ' f'"{spectacular_settings.DEFAULT_QUERY_MANAGER}" was not found. You may ' f'need to change the DEFAULT_QUERY_MANAGER setting. bailing.' ) return getattr(model, spectacular_settings.DEFAULT_QUERY_MANAGER)
null
2,087
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) _SchemaType = Dict[str, Any] def build_media_type_object(schema, examples=None, encoding=None) -> _SchemaType: media_type_object = {'schema': schema} if examples: media_type_object['examples'] = examples if encoding: media_type_object['encoding'] = encoding return media_type_object
null
2,088
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) _SchemaType = Dict[str, Any] class OpenApiExample(OpenApiSchemaBase): """ Helper class to document a API parameter / request body / response body with a concrete example value. It is recommended to provide a singular example value, since pagination and list responses are handled by drf-spectacular. The example will be attached to the operation object where appropriate, i.e. where the given ``media_type``, ``status_code`` and modifiers match. Example that do not match any scenario are ignored. - media_type will default to 'application/json' unless implicitly specified through :class:`.OpenApiResponse` - status_codes will default to [200, 201] unless implicitly specified through :class:`.OpenApiResponse` """ def __init__( self, name: str, value: Any = empty, external_value: str = '', summary: _StrOrPromise = '', description: _StrOrPromise = '', request_only: bool = False, response_only: bool = False, parameter_only: Optional[Tuple[str, _ParameterLocationType]] = None, media_type: Optional[str] = None, status_codes: Optional[Sequence[Union[str, int]]] = None, ): self.name = name self.summary = summary self.description = description self.value = value self.external_value = external_value self.request_only = request_only self.response_only = response_only self.parameter_only = parameter_only self.media_type = media_type self.status_codes = status_codes def build_examples_list(examples: Sequence[OpenApiExample]) -> _SchemaType: schema = {} for example in examples: normalized_name = inflection.camelize(example.name.replace(' ', '_')) sub_schema = {} if example.value is not empty: sub_schema['value'] = example.value if example.external_value: sub_schema['externalValue'] = example.external_value if example.summary: sub_schema['summary'] = example.summary elif normalized_name != example.name: sub_schema['summary'] = example.name if example.description: sub_schema['description'] = example.description schema[normalized_name] = sub_schema return schema
null
2,089
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def build_choice_description_list(choices) -> str: def list_hash(lst: Any) -> str: spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) _SchemaType = Dict[str, Any] def build_choice_field(field) -> _SchemaType: choices = list(OrderedDict.fromkeys(field.choices)) # preserve order and remove duplicates if all(isinstance(choice, bool) for choice in choices): type: Optional[str] = 'boolean' elif all(isinstance(choice, int) for choice in choices): type = 'integer' elif all(isinstance(choice, (int, float, Decimal)) for choice in choices): # `number` includes `integer` # Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21 type = 'number' elif all(isinstance(choice, str) for choice in choices): type = 'string' else: type = None if field.allow_blank and '' not in choices: choices.append('') if field.allow_null and None not in choices: choices.append(None) schema: _SchemaType = { # The value of `enum` keyword MUST be an array and SHOULD be unique. # Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.20 'enum': choices } # If We figured out `type` then and only then we should set it. It must be a string. # Ref: https://swagger.io/docs/specification/data-models/data-types/#mixed-type # It is optional but it can not be null. # Ref: https://tools.ietf.org/html/draft-wright-json-schema-validation-00#section-5.21 if type: schema['type'] = type if spectacular_settings.ENUM_GENERATE_CHOICE_DESCRIPTION: schema['description'] = build_choice_description_list(field.choices.items()) schema['x-spec-enum-id'] = list_hash([(k, v) for k, v in field.choices.items() if k not in ('', None)]) return schema
null
2,090
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) The provided code snippet includes necessary dependencies for implementing the `build_bearer_security_scheme_object` function. Write a Python function `def build_bearer_security_scheme_object(header_name, token_prefix, bearer_format=None)` to solve the following problem: Either build a bearer scheme or a fallback due to OpenAPI 3.0.3 limitations Here is the function: def build_bearer_security_scheme_object(header_name, token_prefix, bearer_format=None): """ Either build a bearer scheme or a fallback due to OpenAPI 3.0.3 limitations """ # normalize Django header quirks if header_name.startswith('HTTP_'): header_name = header_name[5:] header_name = header_name.replace('_', '-').capitalize() if token_prefix == 'Bearer' and header_name == 'Authorization': return { 'type': 'http', 'scheme': 'bearer', **({'bearerFormat': bearer_format} if bearer_format else {}), } else: return { 'type': 'apiKey', 'in': 'header', 'name': header_name, 'description': _( 'Token-based authentication with required prefix "%s"' ) % token_prefix }
Either build a bearer scheme or a fallback due to OpenAPI 3.0.3 limitations
2,091
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def sanitize_specification_extensions(extensions): # https://spec.openapis.org/oas/v3.0.3#specificationExtensions output = {} for key, value in extensions.items(): if not re.match(r'^x-', key): warn(f'invalid extension {key!r}. vendor extensions must start with "x-"') else: output[key] = value return output spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) _SchemaType = Dict[str, Any] def build_root_object(paths, components, webhooks, version) -> _SchemaType: settings = spectacular_settings if settings.VERSION and version: version = f'{settings.VERSION} ({version})' else: version = settings.VERSION or version or '' root = { 'openapi': settings.OAS_VERSION, 'info': { 'title': settings.TITLE, 'version': version, **sanitize_specification_extensions(settings.EXTENSIONS_INFO), }, 'paths': {**paths, **settings.APPEND_PATHS}, 'components': components, **sanitize_specification_extensions(settings.EXTENSIONS_ROOT), } if settings.DESCRIPTION: root['info']['description'] = settings.DESCRIPTION if settings.TOS: root['info']['termsOfService'] = settings.TOS if settings.CONTACT: root['info']['contact'] = settings.CONTACT if settings.LICENSE: root['info']['license'] = settings.LICENSE if settings.SERVERS: root['servers'] = settings.SERVERS if settings.TAGS: root['tags'] = settings.TAGS if settings.EXTERNAL_DOCS: root['externalDocs'] = settings.EXTERNAL_DOCS if webhooks: root['webhooks'] = webhooks return root
null
2,092
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def safe_ref(schema: _SchemaType) -> _SchemaType: """ ensure that $ref has its own context and does not remove potential sibling entries when $ref is substituted. also remove useless singular "allOf" . """ if '$ref' in schema and len(schema) > 1: return {'allOf': [{'$ref': schema.pop('$ref')}], **schema} if 'allOf' in schema and len(schema) == 1 and len(schema['allOf']) == 1: return schema['allOf'][0] return schema spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) _SchemaType = Dict[str, Any] def append_meta(schema: _SchemaType, meta: _SchemaType) -> _SchemaType: if spectacular_settings.OAS_VERSION.startswith('3.1'): schema_nullable = meta.pop('nullable', None) meta_nullable = schema.pop('nullable', None) if schema_nullable or meta_nullable: if 'type' in schema: schema['type'] = [schema['type'], 'null'] elif '$ref' in schema: schema = {'oneOf': [schema, {'type': 'null'}]} elif len(schema) == 1 and 'oneOf' in schema: schema['oneOf'].append({'type': 'null'}) elif not schema: schema = {'oneOf': [{}, {'type': 'null'}]} else: assert False, 'Invalid nullable case' # pragma: no cover # these two aspects were merged in OpenAPI 3.1 if "exclusiveMinimum" in schema and "minimum" in schema: schema["exclusiveMinimum"] = schema.pop("minimum") if "exclusiveMaximum" in schema and "maximum" in schema: schema["exclusiveMaximum"] = schema.pop("maximum") return safe_ref({**schema, **meta})
null
2,093
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) class UnableToProceedError(Exception): pass def _follow_field_source(model, path: List[str]): """ navigate through root model via given navigation path. supports forward/reverse relations. """ field_or_property = getattr(model, path[0], None) if len(path) == 1: # end of traversal if isinstance(field_or_property, property): return field_or_property.fget elif isinstance(field_or_property, CACHED_PROPERTY_FUNCS): return field_or_property.func elif callable(field_or_property): return field_or_property elif isinstance(field_or_property, ManyToManyDescriptor): if field_or_property.reverse: return field_or_property.rel.target_field # m2m reverse else: return field_or_property.field.target_field # m2m forward elif isinstance(field_or_property, ReverseOneToOneDescriptor): return field_or_property.related.target_field # o2o reverse elif isinstance(field_or_property, ReverseManyToOneDescriptor): return field_or_property.rel.target_field # foreign reverse elif isinstance(field_or_property, ForwardManyToOneDescriptor): return field_or_property.field.target_field # o2o & foreign forward else: field = model._meta.get_field(path[0]) if isinstance(field, ForeignObjectRel): # case only occurs when relations are traversed in reverse and # not via the related_name (default: X_set) but the model name. return field.target_field else: return field else: if ( isinstance(field_or_property, (property,) + CACHED_PROPERTY_FUNCS) or callable(field_or_property) ): if isinstance(field_or_property, property): target_model = _follow_return_type(field_or_property.fget) elif isinstance(field_or_property, CACHED_PROPERTY_FUNCS): target_model = _follow_return_type(field_or_property.func) else: target_model = _follow_return_type(field_or_property) if not target_model: raise UnableToProceedError( f'could not follow field source through intermediate property "{path[0]}" ' f'on model {model}. Please add a type hint on the model\'s property/function ' f'to enable traversal of the source path "{".".join(path)}".' ) return _follow_field_source(target_model, path[1:]) else: target_model = model._meta.get_field(path[0]).related_model return _follow_field_source(target_model, path[1:]) def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') The provided code snippet includes necessary dependencies for implementing the `follow_field_source` function. Write a Python function `def follow_field_source(model, path, default=None, emit_warnings=True)` to solve the following problem: a model traversal chain "foreignkey.foreignkey.value" can either end with an actual model field instance "value" or a model property function named "value". differentiate the cases. :return: models.Field or function object Here is the function: def follow_field_source(model, path, default=None, emit_warnings=True): """ a model traversal chain "foreignkey.foreignkey.value" can either end with an actual model field instance "value" or a model property function named "value". differentiate the cases. :return: models.Field or function object """ try: return _follow_field_source(model, path) except UnableToProceedError as e: if emit_warnings: warn(e) except Exception as exc: if emit_warnings: warn( f'could not resolve field on model {model} with path "{".".join(path)}". ' f'This is likely a custom field that does some unknown magic. Maybe ' f'consider annotating the field/property? Defaulting to "string". (Exception: {exc})' ) def dummy_property(obj) -> str: # type: ignore pass # pragma: no cover return default or dummy_property
a model traversal chain "foreignkey.foreignkey.value" can either end with an actual model field instance "value" or a model property function named "value". differentiate the cases. :return: models.Field or function object
2,094
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) The provided code snippet includes necessary dependencies for implementing the `follow_model_field_lookup` function. Write a Python function `def follow_model_field_lookup(model, lookup)` to solve the following problem: Follow a model lookup `foreignkey__foreignkey__field` in the same way that Django QuerySet.filter() does, returning the final models.Field. Here is the function: def follow_model_field_lookup(model, lookup): """ Follow a model lookup `foreignkey__foreignkey__field` in the same way that Django QuerySet.filter() does, returning the final models.Field. """ query = Query(model) lookup_splitted = lookup.split(LOOKUP_SEP) _, field, _, _ = query.names_to_path(lookup_splitted, query.get_meta()) return field
Follow a model lookup `foreignkey__foreignkey__field` in the same way that Django QuerySet.filter() does, returning the final models.Field.
2,095
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) The provided code snippet includes necessary dependencies for implementing the `alpha_operation_sorter` function. Write a Python function `def alpha_operation_sorter(endpoint)` to solve the following problem: sort endpoints first alphanumerically by path, then by method order Here is the function: def alpha_operation_sorter(endpoint): """ sort endpoints first alphanumerically by path, then by method order """ path, path_regex, method, callback = endpoint method_priority = { 'GET': 0, 'POST': 1, 'PUT': 2, 'PATCH': 3, 'DELETE': 4 }.get(method, 5) # Sort foo{arg} after foo/, but before foo/bar if path.endswith('/'): path = path[:-1] + ' ' path = path.replace('{', '!') return path, method_priority
sort endpoints first alphanumerically by path, then by method order
2,096
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def is_basic_type(obj: Any, allow_none=True) -> TypeGuard[_KnownPythonTypes]: if not isinstance(obj, collections.abc.Hashable): return False if not allow_none and (obj is None or obj is OpenApiTypes.NONE): return False return obj in get_openapi_type_mapping() or obj in PYTHON_TYPE_MAPPING def build_basic_type(obj: Union[_KnownPythonTypes, OpenApiTypes]) -> Optional[_SchemaType]: """ resolve either enum or actual type and yield schema template for modification """ openapi_type_mapping = get_openapi_type_mapping() if obj is None or type(obj) is None or obj is OpenApiTypes.NONE: return None elif obj in openapi_type_mapping: return dict(openapi_type_mapping[obj]) elif obj in PYTHON_TYPE_MAPPING: return dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[obj]]) else: warn(f'could not resolve type for "{obj}". defaulting to "string"') return dict(openapi_type_mapping[OpenApiTypes.STR]) def build_parameter_type( name: str, schema: _SchemaType, location: _ParameterLocationType, required=False, description=None, enum=None, pattern=None, deprecated=False, explode=None, style=None, default=None, allow_blank=True, examples=None, extensions=None, ) -> _SchemaType: irrelevant_field_meta = ['readOnly', 'writeOnly'] if location == OpenApiParameter.PATH: irrelevant_field_meta += ['nullable', 'default'] schema = { 'in': location, 'name': name, 'schema': {k: v for k, v in schema.items() if k not in irrelevant_field_meta}, } if description: schema['description'] = description if required or location == 'path': schema['required'] = True if deprecated: schema['deprecated'] = True if explode is not None: schema['explode'] = explode if style is not None: schema['style'] = style if enum: # in case of array schema, enum makes little sense on the array itself if schema['schema'].get('type') == 'array': schema['schema']['items']['enum'] = sorted(enum, key=str) else: schema['schema']['enum'] = sorted(enum, key=str) if pattern is not None: # in case of array schema, pattern only makes sense on the items if schema['schema'].get('type') == 'array': schema['schema']['items']['pattern'] = pattern else: schema['schema']['pattern'] = pattern if default is not None and 'default' not in irrelevant_field_meta: schema['schema']['default'] = default if not allow_blank and schema['schema'].get('type') == 'string': schema['schema']['minLength'] = schema['schema'].get('minLength', 1) if examples: schema['examples'] = examples if extensions: schema.update(sanitize_specification_extensions(extensions)) return schema def anchor_pattern(pattern: str) -> str: if not pattern.startswith('^'): pattern = '^' + pattern if not pattern.endswith('$'): pattern = pattern + '$' return pattern def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') def error(msg: str, delayed: Any = None) -> None: if delayed: errors = get_override(delayed, 'errors', []) errors.append(msg) set_override(delayed, 'errors', errors) else: GENERATOR_STATS.emit(msg, 'error') spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) class OpenApiTypes(enum.Enum): """ Basic types known to the OpenAPI specification or at least common format extension of it. - Use ``BYTE`` for base64-encoded data wrapped in a string - Use ``BINARY`` for raw binary data - Use ``OBJECT`` for arbitrary free-form object (usually a :py:class:`dict`) """ #: Converted to ``{"type": "number"}``. NUMBER = enum.auto() #: Converted to ``{"type": "number", "format": "float"}``. #: Equivalent to :py:class:`float`. FLOAT = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. DOUBLE = enum.auto() #: Converted to ``{"type": "boolean"}``. #: Equivalent to :py:class:`bool`. BOOL = enum.auto() #: Converted to ``{"type": "string"}``. #: Equivalent to :py:class:`str`. STR = enum.auto() #: Converted to ``{"type": "string", "format": "byte"}``. #: Use this for base64-encoded data wrapped in a string. BYTE = enum.auto() #: Converted to ``{"type": "string", "format": "binary"}``. #: Equivalent to :py:class:`bytes`. #: Use this for raw binary data. BINARY = enum.auto() #: Converted to ``{"type": "string", "format": "password"}``. PASSWORD = enum.auto() #: Converted to ``{"type": "integer"}``. #: Equivalent to :py:class:`int`. INT = enum.auto() #: Converted to ``{"type": "integer", "format": "int32"}``. INT32 = enum.auto() #: Converted to ``{"type": "integer", "format": "int64"}``. INT64 = enum.auto() #: Converted to ``{"type": "string", "format": "uuid"}``. #: Equivalent to :py:class:`~uuid.UUID`. UUID = enum.auto() #: Converted to ``{"type": "string", "format": "uri"}``. URI = enum.auto() #: Converted to ``{"type": "string", "format": "uri-reference"}``. URI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "uri-template"}``. URI_TPL = enum.auto() #: Converted to ``{"type": "string", "format": "iri"}``. IRI = enum.auto() #: Converted to ``{"type": "string", "format": "iri-reference"}``. IRI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "ipv4"}``. #: Equivalent to :py:class:`~ipaddress.IPv4Address`. IP4 = enum.auto() #: Converted to ``{"type": "string", "format": "ipv6"}``. #: Equivalent to :py:class:`~ipaddress.IPv6Address`. IP6 = enum.auto() #: Converted to ``{"type": "string", "format": "hostname"}``. HOSTNAME = enum.auto() #: Converted to ``{"type": "string", "format": "idn-hostname"}``. IDN_HOSTNAME = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. #: The same as :py:attr:`~drf_spectacular.types.OpenApiTypes.DOUBLE`. #: Equivalent to :py:class:`~decimal.Decimal`. DECIMAL = enum.auto() #: Converted to ``{"type": "string", "format": "date-time"}``. #: Equivalent to :py:class:`~datetime.datetime`. DATETIME = enum.auto() #: Converted to ``{"type": "string", "format": "date"}``. #: Equivalent to :py:class:`~datetime.date`. DATE = enum.auto() #: Converted to ``{"type": "string", "format": "time"}``. #: Equivalent to :py:class:`~datetime.time`. TIME = enum.auto() #: Converted to ``{"type": "string", "format": "duration"}``. #: Equivalent to :py:class:`~datetime.timedelta`. #: Expressed according to ISO 8601. DURATION = enum.auto() #: Converted to ``{"type": "string", "format": "email"}``. EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "idn-email"}``. IDN_EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "json-pointer"}``. JSON_PTR = enum.auto() #: Converted to ``{"type": "string", "format": "relative-json-pointer"}``. JSON_PTR_REL = enum.auto() #: Converted to ``{"type": "string", "format": "regex"}``. REGEX = enum.auto() #: Converted to ``{"type": "object", ...}``. #: Use this for arbitrary free-form objects (usually a :py:class:`dict`). #: The ``additionalProperties`` item is added depending on the ``GENERIC_ADDITIONAL_PROPERTIES`` setting. OBJECT = enum.auto() #: Equivalent to :py:data:`None`. #: This signals that the request or response is empty. NONE = enum.auto() #: Converted to ``{}`` which sets no type and format. #: Equivalent to :py:class:`typing.Any`. ANY = enum.auto() DJANGO_PATH_CONVERTER_MAPPING = { 'int': OpenApiTypes.INT, 'path': OpenApiTypes.STR, 'slug': OpenApiTypes.STR, 'str': OpenApiTypes.STR, 'uuid': OpenApiTypes.UUID, 'drf_format_suffix': OpenApiTypes.STR, } class OpenApiParameter(OpenApiSchemaBase): """ Helper class to document request query/path/header/cookie parameters. Can also be used to document response headers. Please note that not all arguments apply to all ``location``/``type``/direction variations, e.g. path parameters are ``required=True`` by definition. For valid ``style`` choices please consult the `OpenAPI specification <https://swagger.io/specification/#style-values>`_. """ QUERY: Final = 'query' PATH: Final = 'path' HEADER: Final = 'header' COOKIE: Final = 'cookie' def __init__( self, name: str, type: Union[_SerializerType, _KnownPythonTypes, OpenApiTypes, _SchemaType] = str, location: _ParameterLocationType = QUERY, required: bool = False, description: _StrOrPromise = '', enum: Optional[Sequence[Any]] = None, pattern: Optional[str] = None, deprecated: bool = False, style: Optional[str] = None, explode: Optional[bool] = None, default: Any = None, allow_blank: bool = True, many: Optional[bool] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, exclude: bool = False, response: Union[bool, Sequence[Union[int, str]]] = False, ): self.name = name self.type = type self.location = location self.required = required self.description = description self.enum = enum self.pattern = pattern self.deprecated = deprecated self.style = style self.explode = explode self.default = default self.allow_blank = allow_blank self.many = many self.examples = examples or [] self.extensions = extensions self.exclude = exclude self.response = response The provided code snippet includes necessary dependencies for implementing the `resolve_django_path_parameter` function. Write a Python function `def resolve_django_path_parameter(path_regex, variable, available_formats)` to solve the following problem: convert django style path parameters to OpenAPI parameters. Here is the function: def resolve_django_path_parameter(path_regex, variable, available_formats): """ convert django style path parameters to OpenAPI parameters. """ registered_converters = get_converters() for match in _PATH_PARAMETER_COMPONENT_RE.finditer(path_regex): converter, parameter = match.group('converter'), match.group('parameter') enum_values = None if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk': parameter = 'id' elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'): parameter = f'{parameter[:-3]}_id' if parameter != variable: continue # RE also matches untyped patterns (e.g. "<id>") if not converter: return None # special handling for drf_format_suffix if converter.startswith('drf_format_suffix_'): explicit_formats = converter[len('drf_format_suffix_'):].split('_') enum_values = [ f'.{suffix}' for suffix in explicit_formats if suffix in available_formats ] converter = 'drf_format_suffix' elif converter == 'drf_format_suffix': enum_values = [f'.{suffix}' for suffix in available_formats] if converter in spectacular_settings.PATH_CONVERTER_OVERRIDES: override = spectacular_settings.PATH_CONVERTER_OVERRIDES[converter] if is_basic_type(override): schema = build_basic_type(override) elif isinstance(override, dict): schema = dict(override) else: warn( f'Unable to use path converter override for "{converter}". ' f'Please refer to the documentation on how to use this.' ) return None elif converter in DJANGO_PATH_CONVERTER_MAPPING: schema = build_basic_type(DJANGO_PATH_CONVERTER_MAPPING[converter]) elif converter in registered_converters: # gracious fallback for custom converters that have no override specified. schema = build_basic_type(OpenApiTypes.STR) schema['pattern'] = anchor_pattern(registered_converters[converter].regex) else: error(f'Encountered path converter "{converter}" that is unknown to Django.') return None return build_parameter_type( name=variable, schema=schema, location=OpenApiParameter.PATH, enum=enum_values, ) return None
convert django style path parameters to OpenAPI parameters.
2,097
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def build_basic_type(obj: Union[_KnownPythonTypes, OpenApiTypes]) -> Optional[_SchemaType]: """ resolve either enum or actual type and yield schema template for modification """ openapi_type_mapping = get_openapi_type_mapping() if obj is None or type(obj) is None or obj is OpenApiTypes.NONE: return None elif obj in openapi_type_mapping: return dict(openapi_type_mapping[obj]) elif obj in PYTHON_TYPE_MAPPING: return dict(openapi_type_mapping[PYTHON_TYPE_MAPPING[obj]]) else: warn(f'could not resolve type for "{obj}". defaulting to "string"') return dict(openapi_type_mapping[OpenApiTypes.STR]) def build_parameter_type( name: str, schema: _SchemaType, location: _ParameterLocationType, required=False, description=None, enum=None, pattern=None, deprecated=False, explode=None, style=None, default=None, allow_blank=True, examples=None, extensions=None, ) -> _SchemaType: irrelevant_field_meta = ['readOnly', 'writeOnly'] if location == OpenApiParameter.PATH: irrelevant_field_meta += ['nullable', 'default'] schema = { 'in': location, 'name': name, 'schema': {k: v for k, v in schema.items() if k not in irrelevant_field_meta}, } if description: schema['description'] = description if required or location == 'path': schema['required'] = True if deprecated: schema['deprecated'] = True if explode is not None: schema['explode'] = explode if style is not None: schema['style'] = style if enum: # in case of array schema, enum makes little sense on the array itself if schema['schema'].get('type') == 'array': schema['schema']['items']['enum'] = sorted(enum, key=str) else: schema['schema']['enum'] = sorted(enum, key=str) if pattern is not None: # in case of array schema, pattern only makes sense on the items if schema['schema'].get('type') == 'array': schema['schema']['items']['pattern'] = pattern else: schema['schema']['pattern'] = pattern if default is not None and 'default' not in irrelevant_field_meta: schema['schema']['default'] = default if not allow_blank and schema['schema'].get('type') == 'string': schema['schema']['minLength'] = schema['schema'].get('minLength', 1) if examples: schema['examples'] = examples if extensions: schema.update(sanitize_specification_extensions(extensions)) return schema def anchor_pattern(pattern: str) -> str: if not pattern.startswith('^'): pattern = '^' + pattern if not pattern.endswith('$'): pattern = pattern + '$' return pattern def analyze_named_regex_pattern(path: str) -> Dict[str, str]: """ safely extract named groups and their pattern from given regex pattern """ result = {} stack = 0 name_capture, name_buffer = False, '' regex_capture, regex_buffer = False, '' i = 0 while i < len(path): # estimate state at position i skip = False if path[i] == '\\': ff = 2 elif path[i:i + 4] == '(?P<': skip = True name_capture = True ff = 4 elif path[i] in '(' and regex_capture: stack += 1 ff = 1 elif path[i] == '>' and name_capture: assert name_buffer name_capture = False regex_capture = True skip = True ff = 1 elif path[i] in ')' and regex_capture: if not stack: regex_capture = False result[name_buffer] = regex_buffer name_buffer, regex_buffer = '', '' else: stack -= 1 ff = 1 else: ff = 1 # fill buffer based on state if name_capture and not skip: name_buffer += path[i:i + ff] elif regex_capture and not skip: regex_buffer += path[i:i + ff] i += ff assert not stack return result spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) class OpenApiTypes(enum.Enum): """ Basic types known to the OpenAPI specification or at least common format extension of it. - Use ``BYTE`` for base64-encoded data wrapped in a string - Use ``BINARY`` for raw binary data - Use ``OBJECT`` for arbitrary free-form object (usually a :py:class:`dict`) """ #: Converted to ``{"type": "number"}``. NUMBER = enum.auto() #: Converted to ``{"type": "number", "format": "float"}``. #: Equivalent to :py:class:`float`. FLOAT = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. DOUBLE = enum.auto() #: Converted to ``{"type": "boolean"}``. #: Equivalent to :py:class:`bool`. BOOL = enum.auto() #: Converted to ``{"type": "string"}``. #: Equivalent to :py:class:`str`. STR = enum.auto() #: Converted to ``{"type": "string", "format": "byte"}``. #: Use this for base64-encoded data wrapped in a string. BYTE = enum.auto() #: Converted to ``{"type": "string", "format": "binary"}``. #: Equivalent to :py:class:`bytes`. #: Use this for raw binary data. BINARY = enum.auto() #: Converted to ``{"type": "string", "format": "password"}``. PASSWORD = enum.auto() #: Converted to ``{"type": "integer"}``. #: Equivalent to :py:class:`int`. INT = enum.auto() #: Converted to ``{"type": "integer", "format": "int32"}``. INT32 = enum.auto() #: Converted to ``{"type": "integer", "format": "int64"}``. INT64 = enum.auto() #: Converted to ``{"type": "string", "format": "uuid"}``. #: Equivalent to :py:class:`~uuid.UUID`. UUID = enum.auto() #: Converted to ``{"type": "string", "format": "uri"}``. URI = enum.auto() #: Converted to ``{"type": "string", "format": "uri-reference"}``. URI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "uri-template"}``. URI_TPL = enum.auto() #: Converted to ``{"type": "string", "format": "iri"}``. IRI = enum.auto() #: Converted to ``{"type": "string", "format": "iri-reference"}``. IRI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "ipv4"}``. #: Equivalent to :py:class:`~ipaddress.IPv4Address`. IP4 = enum.auto() #: Converted to ``{"type": "string", "format": "ipv6"}``. #: Equivalent to :py:class:`~ipaddress.IPv6Address`. IP6 = enum.auto() #: Converted to ``{"type": "string", "format": "hostname"}``. HOSTNAME = enum.auto() #: Converted to ``{"type": "string", "format": "idn-hostname"}``. IDN_HOSTNAME = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. #: The same as :py:attr:`~drf_spectacular.types.OpenApiTypes.DOUBLE`. #: Equivalent to :py:class:`~decimal.Decimal`. DECIMAL = enum.auto() #: Converted to ``{"type": "string", "format": "date-time"}``. #: Equivalent to :py:class:`~datetime.datetime`. DATETIME = enum.auto() #: Converted to ``{"type": "string", "format": "date"}``. #: Equivalent to :py:class:`~datetime.date`. DATE = enum.auto() #: Converted to ``{"type": "string", "format": "time"}``. #: Equivalent to :py:class:`~datetime.time`. TIME = enum.auto() #: Converted to ``{"type": "string", "format": "duration"}``. #: Equivalent to :py:class:`~datetime.timedelta`. #: Expressed according to ISO 8601. DURATION = enum.auto() #: Converted to ``{"type": "string", "format": "email"}``. EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "idn-email"}``. IDN_EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "json-pointer"}``. JSON_PTR = enum.auto() #: Converted to ``{"type": "string", "format": "relative-json-pointer"}``. JSON_PTR_REL = enum.auto() #: Converted to ``{"type": "string", "format": "regex"}``. REGEX = enum.auto() #: Converted to ``{"type": "object", ...}``. #: Use this for arbitrary free-form objects (usually a :py:class:`dict`). #: The ``additionalProperties`` item is added depending on the ``GENERIC_ADDITIONAL_PROPERTIES`` setting. OBJECT = enum.auto() #: Equivalent to :py:data:`None`. #: This signals that the request or response is empty. NONE = enum.auto() #: Converted to ``{}`` which sets no type and format. #: Equivalent to :py:class:`typing.Any`. ANY = enum.auto() class OpenApiParameter(OpenApiSchemaBase): """ Helper class to document request query/path/header/cookie parameters. Can also be used to document response headers. Please note that not all arguments apply to all ``location``/``type``/direction variations, e.g. path parameters are ``required=True`` by definition. For valid ``style`` choices please consult the `OpenAPI specification <https://swagger.io/specification/#style-values>`_. """ QUERY: Final = 'query' PATH: Final = 'path' HEADER: Final = 'header' COOKIE: Final = 'cookie' def __init__( self, name: str, type: Union[_SerializerType, _KnownPythonTypes, OpenApiTypes, _SchemaType] = str, location: _ParameterLocationType = QUERY, required: bool = False, description: _StrOrPromise = '', enum: Optional[Sequence[Any]] = None, pattern: Optional[str] = None, deprecated: bool = False, style: Optional[str] = None, explode: Optional[bool] = None, default: Any = None, allow_blank: bool = True, many: Optional[bool] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, exclude: bool = False, response: Union[bool, Sequence[Union[int, str]]] = False, ): self.name = name self.type = type self.location = location self.required = required self.description = description self.enum = enum self.pattern = pattern self.deprecated = deprecated self.style = style self.explode = explode self.default = default self.allow_blank = allow_blank self.many = many self.examples = examples or [] self.extensions = extensions self.exclude = exclude self.response = response The provided code snippet includes necessary dependencies for implementing the `resolve_regex_path_parameter` function. Write a Python function `def resolve_regex_path_parameter(path_regex, variable)` to solve the following problem: convert regex path parameter to OpenAPI parameter, if pattern is explicitly chosen and not the generic non-empty default '[^/.]+'. Here is the function: def resolve_regex_path_parameter(path_regex, variable): """ convert regex path parameter to OpenAPI parameter, if pattern is explicitly chosen and not the generic non-empty default '[^/.]+'. """ for parameter, pattern in analyze_named_regex_pattern(path_regex).items(): if api_settings.SCHEMA_COERCE_PATH_PK and parameter == 'pk': parameter = 'id' elif spectacular_settings.SCHEMA_COERCE_PATH_PK_SUFFIX and parameter.endswith('_pk'): parameter = f'{parameter[:-3]}_id' if parameter != variable: continue # do not use default catch-all pattern and defer to model resolution if pattern == '[^/.]+': return None return build_parameter_type( name=variable, schema=build_basic_type(OpenApiTypes.STR), pattern=anchor_pattern(pattern), location=OpenApiParameter.PATH, ) return None
convert regex path parameter to OpenAPI parameter, if pattern is explicitly chosen and not the generic non-empty default '[^/.]+'.
2,098
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def is_versioning_supported(versioning_class) -> bool: return issubclass(versioning_class, ( versioning.URLPathVersioning, versioning.NamespaceVersioning, versioning.AcceptHeaderVersioning ))
null
2,099
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def operation_matches_version(view, requested_version) -> bool: try: version, _ = view.determine_version(view.request, **view.kwargs) except exceptions.NotAcceptable: return False else: return str(version) == str(requested_version)
null
2,100
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) if hasattr(types, 'UnionType'): UNION_TYPES: Tuple[Any, ...] = (Union, types.UnionType) else: UNION_TYPES = (Union,) def detype_patterns(patterns): def error(msg: str, delayed: Any = None) -> None: def modify_for_versioning(patterns, method, path, view, requested_version): assert view.versioning_class and view.request assert requested_version view.request.version = requested_version if issubclass(view.versioning_class, versioning.URLPathVersioning): version_param = view.versioning_class.version_param # substitute version variable to emulate request path = uritemplate.partial(path, var_dict={version_param: requested_version}) if isinstance(path, URITemplate): path = path.uri # emulate router behaviour by injecting substituted variable into view view.kwargs[version_param] = requested_version elif issubclass(view.versioning_class, versioning.NamespaceVersioning): try: view.request.resolver_match = get_resolver( urlconf=detype_patterns(tuple(patterns)), ).resolve(path) except Resolver404: error(f"namespace versioning path resolution failed for {path}. Path will be ignored.") elif issubclass(view.versioning_class, versioning.AcceptHeaderVersioning): # Append the version into request accepted_media_type. # e.g "application/json; version=1.0" # To allow the AcceptHeaderVersioning negotiator going through. if not hasattr(view.request, 'accepted_renderer'): # Probably a mock request, content negotiation was not performed, so, we do it now. negotiated = view.perform_content_negotiation(view.request) view.request.accepted_renderer, view.request.accepted_media_type = negotiated media_type = _MediaType(view.request.accepted_media_type) view.request.accepted_media_type = ( f'{media_type.full_type}; {view.versioning_class.version_param}={requested_version}' ) return path
null
2,101
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def modify_media_types_for_versioning(view, media_types: List[str]) -> List[str]: if ( not view.versioning_class or not issubclass(view.versioning_class, versioning.AcceptHeaderVersioning) ): return media_types media_type = _MediaType(view.request.accepted_media_type) version = media_type.params.get(view.versioning_class.version_param) # type: ignore version = unicode_http_header(version) if not version or version == view.versioning_class.default_version: return media_types return [ f'{media_type}; {view.versioning_class.version_param}={version}' for media_type in media_types ]
null
2,102
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) The provided code snippet includes necessary dependencies for implementing the `normalize_result_object` function. Write a Python function `def normalize_result_object(result)` to solve the following problem: resolve non-serializable objects like lazy translation strings and OrderedDict Here is the function: def normalize_result_object(result): """ resolve non-serializable objects like lazy translation strings and OrderedDict """ if isinstance(result, dict) or isinstance(result, OrderedDict): return {k: normalize_result_object(v) for k, v in result.items()} if isinstance(result, list) or isinstance(result, tuple): return [normalize_result_object(v) for v in result] if isinstance(result, Promise): return str(result) for base_type in [bool, int, float, str]: if isinstance(result, base_type): return base_type(result) # coerce basic sub types return result
resolve non-serializable objects like lazy translation strings and OrderedDict
2,103
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) def sanitize_result_object(result): # warn about and resolve operationId collisions with suffixes operations = defaultdict(list) for path, methods in result['paths'].items(): for method, operation in methods.items(): operations[operation['operationId']].append((path, method)) for operation_id, paths in operations.items(): if len(paths) == 1: continue warn(f'operationId "{operation_id}" has collisions {paths}. resolving with numeral suffixes.') for idx, (path, method) in enumerate(sorted(paths)[1:], start=2): suffix = str(idx) if spectacular_settings.CAMELIZE_NAMES else f'_{idx}' result['paths'][path][method]['operationId'] += suffix return result
null
2,104
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) class OpenApiParameter(OpenApiSchemaBase): """ Helper class to document request query/path/header/cookie parameters. Can also be used to document response headers. Please note that not all arguments apply to all ``location``/``type``/direction variations, e.g. path parameters are ``required=True`` by definition. For valid ``style`` choices please consult the `OpenAPI specification <https://swagger.io/specification/#style-values>`_. """ QUERY: Final = 'query' PATH: Final = 'path' HEADER: Final = 'header' COOKIE: Final = 'cookie' def __init__( self, name: str, type: Union[_SerializerType, _KnownPythonTypes, OpenApiTypes, _SchemaType] = str, location: _ParameterLocationType = QUERY, required: bool = False, description: _StrOrPromise = '', enum: Optional[Sequence[Any]] = None, pattern: Optional[str] = None, deprecated: bool = False, style: Optional[str] = None, explode: Optional[bool] = None, default: Any = None, allow_blank: bool = True, many: Optional[bool] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, exclude: bool = False, response: Union[bool, Sequence[Union[int, str]]] = False, ): self.name = name self.type = type self.location = location self.required = required self.description = description self.enum = enum self.pattern = pattern self.deprecated = deprecated self.style = style self.explode = explode self.default = default self.allow_blank = allow_blank self.many = many self.examples = examples or [] self.extensions = extensions self.exclude = exclude self.response = response def camelize_operation(path, operation): for path_variable in re.findall(r'\{(\w+)\}', path): path = path.replace( f'{{{path_variable}}}', f'{{{inflection.camelize(path_variable, False)}}}' ) for parameter in operation.get('parameters', []): if parameter['in'] == OpenApiParameter.PATH: parameter['name'] = inflection.camelize(parameter['name'], False) operation['operationId'] = inflection.camelize(operation['operationId'], False) return path, operation
null
2,105
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) The provided code snippet includes necessary dependencies for implementing the `build_mock_request` function. Write a Python function `def build_mock_request(method, path, view, original_request, **kwargs)` to solve the following problem: build a mocked request and use original request as reference if available Here is the function: def build_mock_request(method, path, view, original_request, **kwargs): """ build a mocked request and use original request as reference if available """ request = getattr(APIRequestFactory(), method.lower())(path=path) request = view.initialize_request(request) if original_request: request.user = original_request.user request.auth = original_request.auth # ignore headers related to authorization as it has been handled above. # also ignore ACCEPT as the MIME type refers to SpectacularAPIView and the # version (if available) has already been processed by SpectacularAPIView. for name, value in original_request.META.items(): if not name.startswith('HTTP_'): continue if name in ['HTTP_ACCEPT', 'HTTP_COOKIE', 'HTTP_AUTHORIZATION']: continue request.META[name] = value return request
build a mocked request and use original request as reference if available
2,106
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) class _Sentinel: pass def is_higher_order_type_hint(hint) -> bool: return isinstance(hint, ( getattr(types, 'GenericAlias', _Sentinel), getattr(types, 'UnionType', _Sentinel), getattr(typing, '_GenericAlias', _Sentinel), getattr(typing, '_UnionGenericAlias', _Sentinel), ))
null
2,107
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def whitelisted(obj: object, classes: Optional[List[Type[object]]], exact=False) -> bool: if classes is None: return True if exact: return obj.__class__ in classes else: return isinstance(obj, tuple(classes))
null
2,108
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') def build_listed_example_value(value: Any, paginator, direction): if not paginator or direction == 'request': return [value] sentinel = object() schema = paginator.get_paginated_response_schema(sentinel) if schema is sentinel: return [value] try: return { field_name: [value] if field_schema is sentinel else field_schema['example'] for field_name, field_schema in schema['properties'].items() } except (AttributeError, KeyError): warn( f"OpenApiExample could not be paginated because {paginator.__class__} either " f"has an unknown schema structure or the individual pagination fields did not " f"provide example values themselves. Using the plain example value as fallback." ) return value
null
2,109
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def filter_supported_arguments(func, **kwargs): sig = inspect.signature(func) return { arg: val for arg, val in kwargs.items() if arg in sig.parameters }
null
2,110
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) def build_serializer_context(view) -> typing.Dict[str, Any]: try: return view.get_serializer_context() except: # noqa return {'request': view.request}
null
2,111
import collections import functools import hashlib import inspect import json import re import sys import types import typing import urllib.parse from abc import ABCMeta from collections import OrderedDict, defaultdict from decimal import Decimal from enum import Enum from typing import ( Any, DefaultDict, Dict, Generic, List, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import inflection import uritemplate from django.apps import apps from django.db.models.constants import LOOKUP_SEP from django.db.models.fields.related_descriptors import ( ForwardManyToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from django.db.models.fields.reverse_related import ForeignObjectRel from django.db.models.sql.query import Query from django.urls.converters import get_converters from django.urls.resolvers import ( # type: ignore[attr-defined] _PATH_PARAMETER_COMPONENT_RE, RegexPattern, Resolver404, RoutePattern, URLPattern, URLResolver, get_resolver, ) from django.utils.functional import Promise, cached_property from django.utils.module_loading import import_string from django.utils.translation import gettext_lazy as _ from rest_framework import exceptions, fields, mixins, serializers, versioning from rest_framework.compat import unicode_http_header from rest_framework.fields import empty from rest_framework.settings import api_settings from rest_framework.test import APIRequestFactory from rest_framework.utils.encoders import JSONEncoder from rest_framework.utils.mediatypes import _MediaType from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList from uritemplate import URITemplate from drf_spectacular.drainage import cache, error, get_override, warn from drf_spectacular.settings import spectacular_settings from drf_spectacular.types import ( DJANGO_PATH_CONVERTER_MAPPING, OPENAPI_TYPE_MAPPING, PYTHON_TYPE_MAPPING, OpenApiTypes, _KnownPythonTypes, ) from drf_spectacular.utils import ( OpenApiExample, OpenApiParameter, OpenApiWebhook, _FieldType, _ListSerializerType, _ParameterLocationType, _SchemaType, _SerializerType, ) class ComponentRegistry: def __init__(self) -> None: self._components: Dict[Tuple[str, str], ResolvedComponent] = {} def register(self, component: ResolvedComponent) -> None: if component in self: warn( f'trying to re-register a {component.type} component with name ' f'{self._components[component.key].name}. this might lead to ' f'a incorrect schema. Look out for reused names' ) self._components[component.key] = component def register_on_missing(self, component: ResolvedComponent) -> None: if component not in self: self._components[component.key] = component def __contains__(self, component): if component.key not in self._components: return False query_obj = component.object registry_obj = self._components[component.key].object query_class = query_obj if inspect.isclass(query_obj) else query_obj.__class__ registry_class = query_obj if inspect.isclass(registry_obj) else registry_obj.__class__ suppress_collision_warning = ( get_override(registry_class, 'suppress_collision_warning', False) or get_override(query_class, 'suppress_collision_warning', False) ) if query_class != registry_class and not suppress_collision_warning: warn( f'Encountered 2 components with identical names "{component.name}" and ' f'different classes {query_class} and {registry_class}. This will very ' f'likely result in an incorrect schema. Try renaming one.' ) return True def __getitem__(self, key) -> ResolvedComponent: if isinstance(key, ResolvedComponent): key = key.key return self._components[key] def __delitem__(self, key): if isinstance(key, ResolvedComponent): key = key.key del self._components[key] def build(self, extra_components) -> _SchemaType: output: DefaultDict[str, _SchemaType] = defaultdict(dict) # build tree from flat registry for component in self._components.values(): output[component.type][component.name] = component.schema # add/override extra components for extra_type, extra_component_dict in extra_components.items(): for component_name, component_schema in extra_component_dict.items(): output[extra_type][component_name] = component_schema # sort by component type then by name return { type: {name: output[type][name] for name in sorted(output[type].keys())} for type in sorted(output.keys()) } def sanitize_specification_extensions(extensions): # https://spec.openapis.org/oas/v3.0.3#specificationExtensions output = {} for key, value in extensions.items(): if not re.match(r'^x-', key): warn(f'invalid extension {key!r}. vendor extensions must start with "x-"') else: output[key] = value return output def build_mocked_view(method: str, path: str, extend_schema_decorator, registry): from rest_framework import parsers, views class TmpView(views.APIView): parser_classes = [parsers.JSONParser] # emulate what Generator would do to setup schema generation. view_callable = TmpView.as_view() view: views.APIView = view_callable.cls() view.request = spectacular_settings.GET_MOCK_REQUEST( method.upper(), path, view, None ) view.kwargs = {} # prepare AutoSchema with "init" values as if get_operation() was called schema: Any = view.schema schema.registry = registry schema.path = path schema.path_regex = path schema.path_prefix = '' schema.method = method.upper() return view class OpenApiWebhook(OpenApiSchemaBase): """ Helper class to document webhook definitions. A webhook specifies a possible out-of-band request initiated by the API provider and the expected responses from the consumer. Please note that this particular :func:`@extend_schema <.extend_schema>` instance operates from the perspective of the webhook origin, which means that ``request`` specifies the outgoing request. For convenience sake, we assume the API provider sends a POST request with a body of type ``application/json`` and the receiver responds with ``200`` if the event was successfully received. :param name: Name under which this webhook is listed in the schema. :param decorator: :func:`@extend_schema <.extend_schema>` decorator that specifies the receiving endpoint. In this special context the allowed parameters are ``requests``, ``responses``, ``summary``, ``description``, ``deprecated``. """ def __init__( self, name: _StrOrPromise, decorator: Union[Callable[[F], F], Dict[str, Callable[[F], F]], Dict[str, Any]], ): self.name = name self.decorator = decorator The provided code snippet includes necessary dependencies for implementing the `process_webhooks` function. Write a Python function `def process_webhooks(webhooks: List[OpenApiWebhook], registry: ComponentRegistry)` to solve the following problem: Creates a mocked view for every webhook. The given extend_schema decorator then specifies the expectations on the receiving end of the callback. Effectively simulates a sub-schema from the opposing perspective via a virtual view definition. Here is the function: def process_webhooks(webhooks: List[OpenApiWebhook], registry: ComponentRegistry): """ Creates a mocked view for every webhook. The given extend_schema decorator then specifies the expectations on the receiving end of the callback. Effectively simulates a sub-schema from the opposing perspective via a virtual view definition. """ result = {} for webhook in webhooks: if isinstance(webhook.decorator, dict): methods = webhook.decorator else: methods = {'post': webhook.decorator} path_items = {} for method, decorator in methods.items(): # a dict indicates a raw schema; use directly if isinstance(decorator, dict): path_items[method.lower()] = decorator continue mocked_view = build_mocked_view( method=method, path="/", extend_schema_decorator=decorator, registry=registry, ) operation = {} description = mocked_view.schema.get_description() if description: operation['description'] = description summary = mocked_view.schema.get_summary() if summary: operation['summary'] = summary tags = mocked_view.schema.get_tags() if tags: operation['tags'] = tags request_body = mocked_view.schema._get_request_body('response') if request_body: operation['requestBody'] = request_body deprecated = mocked_view.schema.is_deprecated() if deprecated: operation['deprecated'] = deprecated operation['responses'] = mocked_view.schema._get_response_bodies('request') extensions = mocked_view.schema.get_extensions() if extensions: operation.update(sanitize_specification_extensions(extensions)) path_items[method.lower()] = operation result[webhook.name] = path_items return result
Creates a mocked view for every webhook. The given extend_schema decorator then specifies the expectations on the receiving end of the callback. Effectively simulates a sub-schema from the opposing perspective via a virtual view definition.
2,112
import inspect import sys from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union from django.utils.functional import Promise from rest_framework.fields import Field, empty from rest_framework.serializers import ListSerializer, Serializer from rest_framework.settings import api_settings from drf_spectacular.drainage import ( error, get_view_method_names, isolate_view_method, set_override, warn, ) from drf_spectacular.types import OpenApiTypes, _KnownPythonTypes _SerializerType = Union[Serializer, Type[Serializer]] _StrOrPromise = Union[str, Promise] _SchemaType = Dict[str, Any] class OpenApiExample(OpenApiSchemaBase): """ Helper class to document a API parameter / request body / response body with a concrete example value. It is recommended to provide a singular example value, since pagination and list responses are handled by drf-spectacular. The example will be attached to the operation object where appropriate, i.e. where the given ``media_type``, ``status_code`` and modifiers match. Example that do not match any scenario are ignored. - media_type will default to 'application/json' unless implicitly specified through :class:`.OpenApiResponse` - status_codes will default to [200, 201] unless implicitly specified through :class:`.OpenApiResponse` """ def __init__( self, name: str, value: Any = empty, external_value: str = '', summary: _StrOrPromise = '', description: _StrOrPromise = '', request_only: bool = False, response_only: bool = False, parameter_only: Optional[Tuple[str, _ParameterLocationType]] = None, media_type: Optional[str] = None, status_codes: Optional[Sequence[Union[str, int]]] = None, ): self.name = name self.summary = summary self.description = description self.value = value self.external_value = external_value self.request_only = request_only self.response_only = response_only self.parameter_only = parameter_only self.media_type = media_type self.status_codes = status_codes class OpenApiParameter(OpenApiSchemaBase): """ Helper class to document request query/path/header/cookie parameters. Can also be used to document response headers. Please note that not all arguments apply to all ``location``/``type``/direction variations, e.g. path parameters are ``required=True`` by definition. For valid ``style`` choices please consult the `OpenAPI specification <https://swagger.io/specification/#style-values>`_. """ QUERY: Final = 'query' PATH: Final = 'path' HEADER: Final = 'header' COOKIE: Final = 'cookie' def __init__( self, name: str, type: Union[_SerializerType, _KnownPythonTypes, OpenApiTypes, _SchemaType] = str, location: _ParameterLocationType = QUERY, required: bool = False, description: _StrOrPromise = '', enum: Optional[Sequence[Any]] = None, pattern: Optional[str] = None, deprecated: bool = False, style: Optional[str] = None, explode: Optional[bool] = None, default: Any = None, allow_blank: bool = True, many: Optional[bool] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, exclude: bool = False, response: Union[bool, Sequence[Union[int, str]]] = False, ): self.name = name self.type = type self.location = location self.required = required self.description = description self.enum = enum self.pattern = pattern self.deprecated = deprecated self.style = style self.explode = explode self.default = default self.allow_blank = allow_blank self.many = many self.examples = examples or [] self.extensions = extensions self.exclude = exclude self.response = response F = TypeVar('F', bound=Callable[..., Any]) class OpenApiCallback(OpenApiSchemaBase): """ Helper class to bundle a callback definition. This specifies a view on the callee's side, effectively stating the expectations on the receiving end. Please note that this particular :func:`@extend_schema <.extend_schema>` instance operates from the perspective of the callback origin, which means that ``request`` specifies the outgoing request. For convenience sake, we assume the callback sends ``application/json`` and return a ``200``. If that is not sufficient, you can use ``request`` and ``responses`` overloads just as you normally would. :param name: Name under which the this callback is listed in the schema. :param path: Path on which the callback operation is performed. To reference request body contents, please refer to OpenAPI specification's `key expressions <https://swagger.io/specification/#key-expression>`_ for valid choices. :param decorator: :func:`@extend_schema <.extend_schema>` decorator that specifies the receiving endpoint. In this special context the allowed parameters are ``requests``, ``responses``, ``summary``, ``description``, ``deprecated``. """ def __init__( self, name: _StrOrPromise, path: str, decorator: Union[Callable[[F], F], Dict[str, Callable[[F], F]], Dict[str, Any]], ): self.name = name self.path = path self.decorator = decorator def error(msg: str, delayed: Any = None) -> None: if delayed: errors = get_override(delayed, 'errors', []) errors.append(msg) set_override(delayed, 'errors', errors) else: GENERATOR_STATS.emit(msg, 'error') def get_view_method_names(view, schema=None) -> List[str]: schema = schema or view.schema return [ item for item in dir(view) if callable(getattr(view, item)) and ( item in view.http_method_names or item in schema.method_mapping.values() or item == 'list' or hasattr(getattr(view, item), 'mapping') ) ] def isolate_view_method(view, method_name): """ Prevent modifying a view method which is derived from other views. Changes to a derived method would leak into the view where the method originated from. Break derivation by wrapping the method and explicitly setting it on the view. """ method = getattr(view, method_name) # no isolation is required if the view method is not derived. # @api_view is a special case that also breaks isolation. It proxies all view # methods through a single handler function, which then also requires isolation. if method_name in view.__dict__ and method.__name__ != 'handler': return method def wrapped_method(self, request, *args, **kwargs): return method(self, request, *args, **kwargs) # wraps() will only create a shallow copy of method.__dict__. Updates to "kwargs" # via @extend_schema would leak to the original method. Isolate by creating a copy. if hasattr(method, 'kwargs'): wrapped_method.kwargs = method.kwargs.copy() setattr(view, method_name, wrapped_method) return wrapped_method The provided code snippet includes necessary dependencies for implementing the `extend_schema` function. Write a Python function `def extend_schema( operation_id: Optional[str] = None, parameters: Optional[Sequence[Union[OpenApiParameter, _SerializerType]]] = None, request: Any = empty, responses: Any = empty, auth: Optional[Sequence[str]] = None, description: Optional[_StrOrPromise] = None, summary: Optional[_StrOrPromise] = None, deprecated: Optional[bool] = None, tags: Optional[Sequence[str]] = None, filters: Optional[bool] = None, exclude: Optional[bool] = None, operation: Optional[_SchemaType] = None, methods: Optional[Sequence[str]] = None, versions: Optional[Sequence[str]] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, callbacks: Optional[Sequence[OpenApiCallback]] = None, external_docs: Optional[Union[Dict[str, str], str]] = None, ) -> Callable[[F], F]` to solve the following problem: Decorator mainly for the "view" method kind. Partially or completely overrides what would be otherwise generated by drf-spectacular. :param operation_id: replaces the auto-generated operation_id. make sure there are no naming collisions. :param parameters: list of additional or replacement parameters added to the auto-discovered fields. :param responses: replaces the discovered Serializer. Takes a variety of inputs that can be used individually or combined - ``Serializer`` class - ``Serializer`` instance (e.g. ``Serializer(many=True)`` for listings) - basic types or instances of ``OpenApiTypes`` - :class:`.OpenApiResponse` for bundling any of the other choices together with either a dedicated response description and/or examples. - :class:`.PolymorphicProxySerializer` for signaling that the operation may yield data from different serializers depending on the circumstances. - ``dict`` with status codes as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. - ``dict`` with tuples (status_code, media_type) as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. :param request: replaces the discovered ``Serializer``. Takes a variety of inputs - ``Serializer`` class/instance - basic types or instances of ``OpenApiTypes`` - :class:`.PolymorphicProxySerializer` for signaling that the operation accepts a set of different types of objects. - ``dict`` with media_type as keys and one of the above as values. Additionally, in this case, it is also possible to provide a raw schema dict as value. :param auth: replace discovered auth with explicit list of auth methods :param description: replaces discovered doc strings :param summary: an optional short summary of the description :param deprecated: mark operation as deprecated :param tags: override default list of tags :param filters: ignore list detection and forcefully enable/disable filter discovery :param exclude: set True to exclude operation from schema :param operation: manually override what auto-discovery would generate. you must provide a OpenAPI3-compliant dictionary that gets directly translated to YAML. :param methods: scope extend_schema to specific methods. matches all by default. :param versions: scope extend_schema to specific API version. matches all by default. :param examples: attach request/response examples to the operation :param extensions: specification extensions, e.g. ``x-badges``, ``x-code-samples``, etc. :param callbacks: associate callbacks with this endpoint :param external_docs: Link external documentation. Provide a dict with an "url" key and optionally a "description" key. For convenience, if only a string is given it is treated as the URL. :return: Here is the function: def extend_schema( operation_id: Optional[str] = None, parameters: Optional[Sequence[Union[OpenApiParameter, _SerializerType]]] = None, request: Any = empty, responses: Any = empty, auth: Optional[Sequence[str]] = None, description: Optional[_StrOrPromise] = None, summary: Optional[_StrOrPromise] = None, deprecated: Optional[bool] = None, tags: Optional[Sequence[str]] = None, filters: Optional[bool] = None, exclude: Optional[bool] = None, operation: Optional[_SchemaType] = None, methods: Optional[Sequence[str]] = None, versions: Optional[Sequence[str]] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, callbacks: Optional[Sequence[OpenApiCallback]] = None, external_docs: Optional[Union[Dict[str, str], str]] = None, ) -> Callable[[F], F]: """ Decorator mainly for the "view" method kind. Partially or completely overrides what would be otherwise generated by drf-spectacular. :param operation_id: replaces the auto-generated operation_id. make sure there are no naming collisions. :param parameters: list of additional or replacement parameters added to the auto-discovered fields. :param responses: replaces the discovered Serializer. Takes a variety of inputs that can be used individually or combined - ``Serializer`` class - ``Serializer`` instance (e.g. ``Serializer(many=True)`` for listings) - basic types or instances of ``OpenApiTypes`` - :class:`.OpenApiResponse` for bundling any of the other choices together with either a dedicated response description and/or examples. - :class:`.PolymorphicProxySerializer` for signaling that the operation may yield data from different serializers depending on the circumstances. - ``dict`` with status codes as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. - ``dict`` with tuples (status_code, media_type) as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. :param request: replaces the discovered ``Serializer``. Takes a variety of inputs - ``Serializer`` class/instance - basic types or instances of ``OpenApiTypes`` - :class:`.PolymorphicProxySerializer` for signaling that the operation accepts a set of different types of objects. - ``dict`` with media_type as keys and one of the above as values. Additionally, in this case, it is also possible to provide a raw schema dict as value. :param auth: replace discovered auth with explicit list of auth methods :param description: replaces discovered doc strings :param summary: an optional short summary of the description :param deprecated: mark operation as deprecated :param tags: override default list of tags :param filters: ignore list detection and forcefully enable/disable filter discovery :param exclude: set True to exclude operation from schema :param operation: manually override what auto-discovery would generate. you must provide a OpenAPI3-compliant dictionary that gets directly translated to YAML. :param methods: scope extend_schema to specific methods. matches all by default. :param versions: scope extend_schema to specific API version. matches all by default. :param examples: attach request/response examples to the operation :param extensions: specification extensions, e.g. ``x-badges``, ``x-code-samples``, etc. :param callbacks: associate callbacks with this endpoint :param external_docs: Link external documentation. Provide a dict with an "url" key and optionally a "description" key. For convenience, if only a string is given it is treated as the URL. :return: """ if methods is not None: methods = [method.upper() for method in methods] def decorator(f): BaseSchema = ( # explicit manually set schema or previous view annotation getattr(f, 'schema', None) # previously set schema with @extend_schema on views methods or getattr(f, 'kwargs', {}).get('schema', None) # previously set schema with @extend_schema on @api_view or getattr(getattr(f, 'cls', None), 'kwargs', {}).get('schema', None) # the default or api_settings.DEFAULT_SCHEMA_CLASS ) if not inspect.isclass(BaseSchema): BaseSchema = BaseSchema.__class__ def is_in_scope(ext_schema): version, _ = ext_schema.view.determine_version( ext_schema.view.request, **ext_schema.view.kwargs ) version_scope = versions is None or version in versions method_scope = methods is None or ext_schema.method in methods return method_scope and version_scope class ExtendedSchema(BaseSchema): def get_operation(self, path, path_regex, path_prefix, method, registry): self.method = method.upper() if operation is not None and is_in_scope(self): return operation return super().get_operation(path, path_regex, path_prefix, method, registry) def is_excluded(self): if exclude is not None and is_in_scope(self): return exclude return super().is_excluded() def get_operation_id(self): if operation_id and is_in_scope(self): return operation_id return super().get_operation_id() def get_override_parameters(self): if parameters and is_in_scope(self): return super().get_override_parameters() + parameters return super().get_override_parameters() def get_auth(self): if auth is not None and is_in_scope(self): return auth return super().get_auth() def get_examples(self): if examples and is_in_scope(self): return super().get_examples() + examples return super().get_examples() def get_request_serializer(self): if request is not empty and is_in_scope(self): return request return super().get_request_serializer() def get_response_serializers(self): if responses is not empty and is_in_scope(self): return responses return super().get_response_serializers() def get_description(self): if description and is_in_scope(self): return description return super().get_description() def get_summary(self): if summary and is_in_scope(self): return str(summary) return super().get_summary() def is_deprecated(self): if deprecated and is_in_scope(self): return deprecated return super().is_deprecated() def get_tags(self): if tags is not None and is_in_scope(self): return tags return super().get_tags() def get_extensions(self): if extensions and is_in_scope(self): return extensions return super().get_extensions() def get_filter_backends(self): if filters is not None and is_in_scope(self): return getattr(self.view, 'filter_backends', []) if filters else [] return super().get_filter_backends() def get_callbacks(self): if callbacks is not None and is_in_scope(self): return callbacks return super().get_callbacks() def get_external_docs(self): if external_docs is not None and is_in_scope(self): return external_docs return super().get_external_docs() if inspect.isclass(f): # either direct decoration of views, or unpacked @api_view from OpenApiViewExtension if operation_id is not None or operation is not None: error( f'using @extend_schema on viewset class {f.__name__} with parameters ' f'operation_id or operation will most likely result in a broken schema.', delayed=f, ) # reorder schema class MRO so that view method annotation takes precedence # over view class annotation. only relevant if there is a method annotation for view_method_name in get_view_method_names(view=f, schema=BaseSchema): if 'schema' not in getattr(getattr(f, view_method_name), 'kwargs', {}): continue view_method = isolate_view_method(f, view_method_name) view_method.kwargs['schema'] = type( 'ExtendedMetaSchema', (view_method.kwargs['schema'], ExtendedSchema), {} ) # persist schema on class to provide annotation to derived view methods. # the second purpose is to serve as base for view multi-annotation f.schema = ExtendedSchema() return f elif callable(f) and hasattr(f, 'cls'): # 'cls' attr signals that as_view() was called, which only applies to @api_view. # keep a "unused" schema reference at root level for multi annotation convenience. setattr(f.cls, 'kwargs', {'schema': ExtendedSchema}) # set schema on method kwargs context to emulate regular view behaviour. for method in f.cls.http_method_names: setattr(getattr(f.cls, method), 'kwargs', {'schema': ExtendedSchema}) return f elif callable(f): # custom actions have kwargs in their context, others don't. create it so our create_view # implementation can overwrite the default schema if not hasattr(f, 'kwargs'): f.kwargs = {} # this simulates what @action is actually doing. somewhere along the line in this process # the schema is picked up from kwargs and used. it's involved my dear friends. # use class instead of instance due to descriptor weakref reverse collisions f.kwargs['schema'] = ExtendedSchema return f else: return f return decorator
Decorator mainly for the "view" method kind. Partially or completely overrides what would be otherwise generated by drf-spectacular. :param operation_id: replaces the auto-generated operation_id. make sure there are no naming collisions. :param parameters: list of additional or replacement parameters added to the auto-discovered fields. :param responses: replaces the discovered Serializer. Takes a variety of inputs that can be used individually or combined - ``Serializer`` class - ``Serializer`` instance (e.g. ``Serializer(many=True)`` for listings) - basic types or instances of ``OpenApiTypes`` - :class:`.OpenApiResponse` for bundling any of the other choices together with either a dedicated response description and/or examples. - :class:`.PolymorphicProxySerializer` for signaling that the operation may yield data from different serializers depending on the circumstances. - ``dict`` with status codes as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. - ``dict`` with tuples (status_code, media_type) as keys and one of the above as values. Additionally in this case, it is also possible to provide a raw schema dict as value. :param request: replaces the discovered ``Serializer``. Takes a variety of inputs - ``Serializer`` class/instance - basic types or instances of ``OpenApiTypes`` - :class:`.PolymorphicProxySerializer` for signaling that the operation accepts a set of different types of objects. - ``dict`` with media_type as keys and one of the above as values. Additionally, in this case, it is also possible to provide a raw schema dict as value. :param auth: replace discovered auth with explicit list of auth methods :param description: replaces discovered doc strings :param summary: an optional short summary of the description :param deprecated: mark operation as deprecated :param tags: override default list of tags :param filters: ignore list detection and forcefully enable/disable filter discovery :param exclude: set True to exclude operation from schema :param operation: manually override what auto-discovery would generate. you must provide a OpenAPI3-compliant dictionary that gets directly translated to YAML. :param methods: scope extend_schema to specific methods. matches all by default. :param versions: scope extend_schema to specific API version. matches all by default. :param examples: attach request/response examples to the operation :param extensions: specification extensions, e.g. ``x-badges``, ``x-code-samples``, etc. :param callbacks: associate callbacks with this endpoint :param external_docs: Link external documentation. Provide a dict with an "url" key and optionally a "description" key. For convenience, if only a string is given it is treated as the URL. :return:
2,113
import inspect import sys from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union from django.utils.functional import Promise from rest_framework.fields import Field, empty from rest_framework.serializers import ListSerializer, Serializer from rest_framework.settings import api_settings from drf_spectacular.drainage import ( error, get_view_method_names, isolate_view_method, set_override, warn, ) from drf_spectacular.types import OpenApiTypes, _KnownPythonTypes _SerializerType = Union[Serializer, Type[Serializer]] _FieldType = Union[Field, Type[Field]] _SchemaType = Dict[str, Any] F = TypeVar('F', bound=Callable[..., Any]) def set_override(obj: Any, prop: str, value: Any) -> Any: if not hasattr(obj, '_spectacular_annotation'): obj._spectacular_annotation = {} elif '_spectacular_annotation' not in obj.__dict__: obj._spectacular_annotation = obj._spectacular_annotation.copy() obj._spectacular_annotation[prop] = value return obj class OpenApiTypes(enum.Enum): """ Basic types known to the OpenAPI specification or at least common format extension of it. - Use ``BYTE`` for base64-encoded data wrapped in a string - Use ``BINARY`` for raw binary data - Use ``OBJECT`` for arbitrary free-form object (usually a :py:class:`dict`) """ #: Converted to ``{"type": "number"}``. NUMBER = enum.auto() #: Converted to ``{"type": "number", "format": "float"}``. #: Equivalent to :py:class:`float`. FLOAT = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. DOUBLE = enum.auto() #: Converted to ``{"type": "boolean"}``. #: Equivalent to :py:class:`bool`. BOOL = enum.auto() #: Converted to ``{"type": "string"}``. #: Equivalent to :py:class:`str`. STR = enum.auto() #: Converted to ``{"type": "string", "format": "byte"}``. #: Use this for base64-encoded data wrapped in a string. BYTE = enum.auto() #: Converted to ``{"type": "string", "format": "binary"}``. #: Equivalent to :py:class:`bytes`. #: Use this for raw binary data. BINARY = enum.auto() #: Converted to ``{"type": "string", "format": "password"}``. PASSWORD = enum.auto() #: Converted to ``{"type": "integer"}``. #: Equivalent to :py:class:`int`. INT = enum.auto() #: Converted to ``{"type": "integer", "format": "int32"}``. INT32 = enum.auto() #: Converted to ``{"type": "integer", "format": "int64"}``. INT64 = enum.auto() #: Converted to ``{"type": "string", "format": "uuid"}``. #: Equivalent to :py:class:`~uuid.UUID`. UUID = enum.auto() #: Converted to ``{"type": "string", "format": "uri"}``. URI = enum.auto() #: Converted to ``{"type": "string", "format": "uri-reference"}``. URI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "uri-template"}``. URI_TPL = enum.auto() #: Converted to ``{"type": "string", "format": "iri"}``. IRI = enum.auto() #: Converted to ``{"type": "string", "format": "iri-reference"}``. IRI_REF = enum.auto() #: Converted to ``{"type": "string", "format": "ipv4"}``. #: Equivalent to :py:class:`~ipaddress.IPv4Address`. IP4 = enum.auto() #: Converted to ``{"type": "string", "format": "ipv6"}``. #: Equivalent to :py:class:`~ipaddress.IPv6Address`. IP6 = enum.auto() #: Converted to ``{"type": "string", "format": "hostname"}``. HOSTNAME = enum.auto() #: Converted to ``{"type": "string", "format": "idn-hostname"}``. IDN_HOSTNAME = enum.auto() #: Converted to ``{"type": "number", "format": "double"}``. #: The same as :py:attr:`~drf_spectacular.types.OpenApiTypes.DOUBLE`. #: Equivalent to :py:class:`~decimal.Decimal`. DECIMAL = enum.auto() #: Converted to ``{"type": "string", "format": "date-time"}``. #: Equivalent to :py:class:`~datetime.datetime`. DATETIME = enum.auto() #: Converted to ``{"type": "string", "format": "date"}``. #: Equivalent to :py:class:`~datetime.date`. DATE = enum.auto() #: Converted to ``{"type": "string", "format": "time"}``. #: Equivalent to :py:class:`~datetime.time`. TIME = enum.auto() #: Converted to ``{"type": "string", "format": "duration"}``. #: Equivalent to :py:class:`~datetime.timedelta`. #: Expressed according to ISO 8601. DURATION = enum.auto() #: Converted to ``{"type": "string", "format": "email"}``. EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "idn-email"}``. IDN_EMAIL = enum.auto() #: Converted to ``{"type": "string", "format": "json-pointer"}``. JSON_PTR = enum.auto() #: Converted to ``{"type": "string", "format": "relative-json-pointer"}``. JSON_PTR_REL = enum.auto() #: Converted to ``{"type": "string", "format": "regex"}``. REGEX = enum.auto() #: Converted to ``{"type": "object", ...}``. #: Use this for arbitrary free-form objects (usually a :py:class:`dict`). #: The ``additionalProperties`` item is added depending on the ``GENERIC_ADDITIONAL_PROPERTIES`` setting. OBJECT = enum.auto() #: Equivalent to :py:data:`None`. #: This signals that the request or response is empty. NONE = enum.auto() #: Converted to ``{}`` which sets no type and format. #: Equivalent to :py:class:`typing.Any`. ANY = enum.auto() The provided code snippet includes necessary dependencies for implementing the `extend_schema_field` function. Write a Python function `def extend_schema_field( field: Union[_SerializerType, _FieldType, OpenApiTypes, _SchemaType], component_name: Optional[str] = None ) -> Callable[[F], F]` to solve the following problem: Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual method) or with custom ``serializers.Field`` implementations. If your custom serializer field base class is already the desired type, decoration is not necessary. To override the discovered base class type, you can decorate your custom field class. Always takes precedence over other mechanisms (e.g. type hints, auto-discovery). :param field: accepts a ``Serializer``, :class:`~.types.OpenApiTypes` or raw ``dict`` :param component_name: signals that the field should be broken out as separate component Here is the function: def extend_schema_field( field: Union[_SerializerType, _FieldType, OpenApiTypes, _SchemaType], component_name: Optional[str] = None ) -> Callable[[F], F]: """ Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual method) or with custom ``serializers.Field`` implementations. If your custom serializer field base class is already the desired type, decoration is not necessary. To override the discovered base class type, you can decorate your custom field class. Always takes precedence over other mechanisms (e.g. type hints, auto-discovery). :param field: accepts a ``Serializer``, :class:`~.types.OpenApiTypes` or raw ``dict`` :param component_name: signals that the field should be broken out as separate component """ def decorator(f): set_override(f, 'field', field) set_override(f, 'field_component_name', component_name) return f return decorator
Decorator for the "field" kind. Can be used with ``SerializerMethodField`` (annotate the actual method) or with custom ``serializers.Field`` implementations. If your custom serializer field base class is already the desired type, decoration is not necessary. To override the discovered base class type, you can decorate your custom field class. Always takes precedence over other mechanisms (e.g. type hints, auto-discovery). :param field: accepts a ``Serializer``, :class:`~.types.OpenApiTypes` or raw ``dict`` :param component_name: signals that the field should be broken out as separate component
2,114
import inspect import sys from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union from django.utils.functional import Promise from rest_framework.fields import Field, empty from rest_framework.serializers import ListSerializer, Serializer from rest_framework.settings import api_settings from drf_spectacular.drainage import ( error, get_view_method_names, isolate_view_method, set_override, warn, ) from drf_spectacular.types import OpenApiTypes, _KnownPythonTypes F = TypeVar('F', bound=Callable[..., Any]) def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') def get_view_method_names(view, schema=None) -> List[str]: schema = schema or view.schema return [ item for item in dir(view) if callable(getattr(view, item)) and ( item in view.http_method_names or item in schema.method_mapping.values() or item == 'list' or hasattr(getattr(view, item), 'mapping') ) ] def isolate_view_method(view, method_name): """ Prevent modifying a view method which is derived from other views. Changes to a derived method would leak into the view where the method originated from. Break derivation by wrapping the method and explicitly setting it on the view. """ method = getattr(view, method_name) # no isolation is required if the view method is not derived. # @api_view is a special case that also breaks isolation. It proxies all view # methods through a single handler function, which then also requires isolation. if method_name in view.__dict__ and method.__name__ != 'handler': return method def wrapped_method(self, request, *args, **kwargs): return method(self, request, *args, **kwargs) # wraps() will only create a shallow copy of method.__dict__. Updates to "kwargs" # via @extend_schema would leak to the original method. Isolate by creating a copy. if hasattr(method, 'kwargs'): wrapped_method.kwargs = method.kwargs.copy() setattr(view, method_name, wrapped_method) return wrapped_method The provided code snippet includes necessary dependencies for implementing the `extend_schema_view` function. Write a Python function `def extend_schema_view(**kwargs) -> Callable[[F], F]` to solve the following problem: Convenience decorator for the "view" kind. Intended for annotating derived view methods that are are not directly present in the view (usually methods like ``list`` or ``retrieve``). Spares you from overriding methods like ``list``, only to perform a super call in the body so that you have have something to attach :func:`@extend_schema <.extend_schema>` to. This decorator also takes care of safely attaching annotations to derived view methods, preventing leakage into unrelated views. This decorator also supports custom DRF ``@action`` with the method name as the key. :param kwargs: method names as argument names and :func:`@extend_schema <.extend_schema>` calls as values Here is the function: def extend_schema_view(**kwargs) -> Callable[[F], F]: """ Convenience decorator for the "view" kind. Intended for annotating derived view methods that are are not directly present in the view (usually methods like ``list`` or ``retrieve``). Spares you from overriding methods like ``list``, only to perform a super call in the body so that you have have something to attach :func:`@extend_schema <.extend_schema>` to. This decorator also takes care of safely attaching annotations to derived view methods, preventing leakage into unrelated views. This decorator also supports custom DRF ``@action`` with the method name as the key. :param kwargs: method names as argument names and :func:`@extend_schema <.extend_schema>` calls as values """ def decorator(view): # special case for @api_view. redirect decoration to enclosed WrappedAPIView if callable(view) and hasattr(view, 'cls'): extend_schema_view(**kwargs)(view.cls) return view available_view_methods = get_view_method_names(view) for method_name, method_decorator in kwargs.items(): if method_name not in available_view_methods: warn( f'@extend_schema_view argument "{method_name}" was not found on view ' f'{view.__name__}. method override for "{method_name}" will be ignored.', delayed=view ) continue # the context of derived methods must not be altered, as it belongs to the # other view. create a new context so the schema can be safely stored in the # wrapped_method. view methods that are not derived can be safely altered. if hasattr(method_decorator, '__iter__'): for sub_method_decorator in method_decorator: sub_method_decorator(isolate_view_method(view, method_name)) else: method_decorator(isolate_view_method(view, method_name)) return view return decorator
Convenience decorator for the "view" kind. Intended for annotating derived view methods that are are not directly present in the view (usually methods like ``list`` or ``retrieve``). Spares you from overriding methods like ``list``, only to perform a super call in the body so that you have have something to attach :func:`@extend_schema <.extend_schema>` to. This decorator also takes care of safely attaching annotations to derived view methods, preventing leakage into unrelated views. This decorator also supports custom DRF ``@action`` with the method name as the key. :param kwargs: method names as argument names and :func:`@extend_schema <.extend_schema>` calls as values
2,115
import inspect import sys from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Type, TypeVar, Union from django.utils.functional import Promise from rest_framework.fields import Field, empty from rest_framework.serializers import ListSerializer, Serializer from rest_framework.settings import api_settings from drf_spectacular.drainage import ( error, get_view_method_names, isolate_view_method, set_override, warn, ) from drf_spectacular.types import OpenApiTypes, _KnownPythonTypes The provided code snippet includes necessary dependencies for implementing the `inline_serializer` function. Write a Python function `def inline_serializer(name: str, fields: Dict[str, Field], **kwargs) -> Serializer` to solve the following problem: A helper function to create an inline serializer. Primary use is with :func:`@extend_schema <.extend_schema>`, where one needs an implicit one-off serializer that is not reflected in an actual class. :param name: name of the :param fields: dict with field names as keys and serializer fields as values :param kwargs: optional kwargs for serializer initialization Here is the function: def inline_serializer(name: str, fields: Dict[str, Field], **kwargs) -> Serializer: """ A helper function to create an inline serializer. Primary use is with :func:`@extend_schema <.extend_schema>`, where one needs an implicit one-off serializer that is not reflected in an actual class. :param name: name of the :param fields: dict with field names as keys and serializer fields as values :param kwargs: optional kwargs for serializer initialization """ serializer_class = type(name, (Serializer,), fields) return serializer_class(**kwargs)
A helper function to create an inline serializer. Primary use is with :func:`@extend_schema <.extend_schema>`, where one needs an implicit one-off serializer that is not reflected in an actual class. :param name: name of the :param fields: dict with field names as keys and serializer fields as values :param kwargs: optional kwargs for serializer initialization
2,116
import contextlib import functools import inspect import sys from collections import defaultdict from typing import Any, Callable, DefaultDict, List, Optional, Tuple, TypeVar GENERATOR_STATS = GeneratorStats() def reset_generator_stats() -> None: GENERATOR_STATS.reset()
null
2,117
import contextlib import functools import inspect import sys from collections import defaultdict from typing import Any, Callable, DefaultDict, List, Optional, Tuple, TypeVar GENERATOR_STATS = GeneratorStats() def _get_source_location(obj): try: sourcefile = inspect.getsourcefile(obj) except: # noqa: E722 sourcefile = None try: # This is a rather expensive operation. Only do it when explicitly enabled (CLI) # and cache results to speed up some recurring objects like serializers. lineno = inspect.getsourcelines(obj)[1] if GENERATOR_STATS._trace_lineno else None except: # noqa: E722 lineno = None return sourcefile, lineno The provided code snippet includes necessary dependencies for implementing the `add_trace_message` function. Write a Python function `def add_trace_message(obj)` to solve the following problem: Adds a message to be used as a prefix when emitting warnings and errors. Here is the function: def add_trace_message(obj): """ Adds a message to be used as a prefix when emitting warnings and errors. """ sourcefile, lineno = _get_source_location(obj) GENERATOR_STATS._traces.append((sourcefile, lineno, obj.__name__)) yield GENERATOR_STATS._traces.pop()
Adds a message to be used as a prefix when emitting warnings and errors.
2,118
import contextlib import functools import inspect import sys from collections import defaultdict from typing import Any, Callable, DefaultDict, List, Optional, Tuple, TypeVar F = TypeVar('F', bound=Callable[..., Any]) The provided code snippet includes necessary dependencies for implementing the `cache` function. Write a Python function `def cache(user_function: F) -> F` to solve the following problem: simple polyfill for python < 3.9 Here is the function: def cache(user_function: F) -> F: """ simple polyfill for python < 3.9 """ return functools.lru_cache(maxsize=None)(user_function) # type: ignore
simple polyfill for python < 3.9
2,119
from rest_framework.utils.model_meta import get_field_info from drf_spectacular.drainage import warn from drf_spectacular.extensions import OpenApiSerializerExtension, OpenApiSerializerFieldExtension from drf_spectacular.plumbing import ( ResolvedComponent, build_array_type, build_object_type, follow_field_source, get_doc, ) def build_bbox_schema(): return { "type": "array", "items": {"type": "number"}, "minItems": 4, "maxItems": 4, "example": [12.9721, 77.5933, 12.9721, 77.5933], }
null
2,120
from rest_framework.utils.model_meta import get_field_info from drf_spectacular.drainage import warn from drf_spectacular.extensions import OpenApiSerializerExtension, OpenApiSerializerFieldExtension from drf_spectacular.plumbing import ( ResolvedComponent, build_array_type, build_object_type, follow_field_source, get_doc, ) def build_geo_schema(model_field): from django.contrib.gis.db import models if isinstance(model_field, models.PointField): return build_point_geo_schema() elif isinstance(model_field, models.LineStringField): return build_linestring_geo_schema() elif isinstance(model_field, models.PolygonField): return build_polygon_geo_schema() elif isinstance(model_field, models.MultiPointField): return build_geo_container_schema( "MultiPoint", build_array_type(build_point_schema()) ) elif isinstance(model_field, models.MultiLineStringField): return build_geo_container_schema( "MultiLineString", build_array_type(build_linestring_schema()) ) elif isinstance(model_field, models.MultiPolygonField): return build_geo_container_schema( "MultiPolygon", build_array_type(build_polygon_schema()) ) elif isinstance(model_field, models.GeometryCollectionField): return build_geo_container_schema( "GeometryCollection", build_array_type(build_geometry_geo_schema()) ) elif isinstance(model_field, models.GeometryField): return build_geometry_geo_schema() else: warn("Encountered unknown GIS geometry field") return {} def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') def map_geo_field(serializer, geo_field_name): from rest_framework_gis.fields import GeometrySerializerMethodField field = serializer.fields[geo_field_name] if isinstance(field, GeometrySerializerMethodField): warn("Geometry generation for GeometrySerializerMethodField is not supported.") return {} model_field = get_field_info(serializer.Meta.model).fields[geo_field_name] return build_geo_schema(model_field)
null
2,121
from rest_framework.utils.model_meta import get_field_info from drf_spectacular.drainage import warn from drf_spectacular.extensions import OpenApiSerializerExtension, OpenApiSerializerFieldExtension from drf_spectacular.plumbing import ( ResolvedComponent, build_array_type, build_object_type, follow_field_source, get_doc, ) spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) def _inject_enum_collision_fix(collection): from drf_spectacular.settings import spectacular_settings if not collection and 'GisFeatureEnum' not in spectacular_settings.ENUM_NAME_OVERRIDES: spectacular_settings.ENUM_NAME_OVERRIDES['GisFeatureEnum'] = ('Feature',) if collection and 'GisFeatureCollectionEnum' not in spectacular_settings.ENUM_NAME_OVERRIDES: spectacular_settings.ENUM_NAME_OVERRIDES['GisFeatureCollectionEnum'] = ('FeatureCollection',)
null
2,122
from django.conf import settings from django.utils.version import get_version_tuple from rest_framework import serializers from drf_spectacular.contrib.rest_framework_simplejwt import ( SimpleJWTScheme, TokenRefreshSerializerExtension, ) from drf_spectacular.drainage import warn from drf_spectacular.extensions import OpenApiSerializerExtension, OpenApiViewExtension from drf_spectacular.utils import extend_schema def get_dj_rest_auth_setting(class_name, setting_name): from dj_rest_auth.__version__ import __version__ if get_version_tuple(__version__) < (3, 0, 0): from dj_rest_auth import app_settings return getattr(app_settings, class_name) else: from dj_rest_auth.app_settings import api_settings return getattr(api_settings, setting_name) def get_token_serializer_class(): from dj_rest_auth.__version__ import __version__ if get_version_tuple(__version__) < (3, 0, 0): use_jwt = getattr(settings, 'REST_USE_JWT', False) else: from dj_rest_auth.app_settings import api_settings use_jwt = api_settings.USE_JWT if use_jwt: return get_dj_rest_auth_setting('JWTSerializer', 'JWT_SERIALIZER') else: return get_dj_rest_auth_setting('TokenSerializer', 'TOKEN_SERIALIZER')
null
2,123
import re from typing import Optional from django.utils.module_loading import import_string def camelize_serializer_fields(result, generator, request, public): from django.conf import settings from djangorestframework_camel_case.settings import api_settings from djangorestframework_camel_case.util import camelize_re, underscore_to_camel # prunes subtrees from camelization based on owning field name ignore_fields = api_settings.JSON_UNDERSCOREIZE.get("ignore_fields") or () # ignore certain field names while camelizing ignore_keys = api_settings.JSON_UNDERSCOREIZE.get("ignore_keys") or () def has_middleware_installed(): try: from djangorestframework_camel_case.middleware import CamelCaseMiddleWare except ImportError: return False for middleware in [import_string(m) for m in settings.MIDDLEWARE]: try: if issubclass(CamelCaseMiddleWare, middleware): return True except TypeError: pass def camelize_str(key: str) -> str: new_key = re.sub(camelize_re, underscore_to_camel, key) if "_" in key else key if key in ignore_keys or new_key in ignore_keys: return key return new_key def camelize_component(schema: dict, name: Optional[str] = None) -> dict: if name is not None and (name in ignore_fields or camelize_str(name) in ignore_fields): return schema elif schema.get('type') == 'object': if 'properties' in schema: schema['properties'] = { camelize_str(field_name): camelize_component(field_schema, field_name) for field_name, field_schema in schema['properties'].items() } if 'required' in schema: schema['required'] = [camelize_str(field) for field in schema['required']] elif schema.get('type') == 'array': camelize_component(schema['items']) return schema for (_, component_type), component in generator.registry._components.items(): if component_type == 'schemas': camelize_component(component.schema) if has_middleware_installed(): for url_schema in result["paths"].values(): for method_schema in url_schema.values(): for parameter in method_schema.get("parameters", []): parameter["name"] = camelize_str(parameter["name"]) # inplace modification of components also affect result dict, so regeneration is not necessary return result
null
2,124
from django.core.checks import Error, Warning, register GENERATOR_STATS = GeneratorStats() spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) The provided code snippet includes necessary dependencies for implementing the `schema_check` function. Write a Python function `def schema_check(app_configs, **kwargs)` to solve the following problem: Perform dummy generation and emit warnings/errors as part of Django's check framework Here is the function: def schema_check(app_configs, **kwargs): """ Perform dummy generation and emit warnings/errors as part of Django's check framework """ from drf_spectacular.drainage import GENERATOR_STATS from drf_spectacular.settings import spectacular_settings if not spectacular_settings.ENABLE_DJANGO_DEPLOY_CHECK: return [] errors = [] try: with GENERATOR_STATS.silence(): spectacular_settings.DEFAULT_GENERATOR_CLASS().get_schema(request=None, public=True) except Exception as exc: errors.append( Error(f'Schema generation threw exception "{exc}"', id='drf_spectacular.E001') ) if GENERATOR_STATS: for w in GENERATOR_STATS._warn_cache: errors.append(Warning(w, id='drf_spectacular.W001')) for e in GENERATOR_STATS._error_cache: errors.append(Warning(e, id='drf_spectacular.W002')) return errors
Perform dummy generation and emit warnings/errors as part of Django's check framework
2,125
import re from collections import defaultdict from inflection import camelize from rest_framework.settings import api_settings from drf_spectacular.drainage import warn from drf_spectacular.plumbing import ( ResolvedComponent, list_hash, load_enum_name_overrides, safe_ref, ) from drf_spectacular.settings import spectacular_settings def postprocess_schema_enum_id_removal(result, generator, **kwargs): """ Iterative modifying approach to scanning the whole schema and removing the temporary helper ids that allowed us to distinguish similar enums. """ def clean(sub_result): if isinstance(sub_result, dict): for key in list(sub_result): if key == 'x-spec-enum-id': del sub_result['x-spec-enum-id'] else: clean(sub_result[key]) elif isinstance(sub_result, (list, tuple)): for item in sub_result: clean(item) clean(result) return result def warn(msg: str, delayed: Any = None) -> None: if delayed: warnings = get_override(delayed, 'warnings', []) warnings.append(msg) set_override(delayed, 'warnings', warnings) else: GENERATOR_STATS.emit(msg, 'warning') def safe_ref(schema: _SchemaType) -> _SchemaType: """ ensure that $ref has its own context and does not remove potential sibling entries when $ref is substituted. also remove useless singular "allOf" . """ if '$ref' in schema and len(schema) > 1: return {'allOf': [{'$ref': schema.pop('$ref')}], **schema} if 'allOf' in schema and len(schema) == 1 and len(schema['allOf']) == 1: return schema['allOf'][0] return schema class ResolvedComponent: # OpenAPI 3.0.3 SCHEMA = 'schemas' RESPONSE = 'responses' PARAMETER = 'parameters' EXAMPLE = 'examples' REQUEST_BODY = 'requestBodies' HEADER = 'headers' SECURITY_SCHEMA = 'securitySchemes' LINK = 'links' CALLBACK = 'callbacks' # OpenAPI 3.1.0+ PATH_ITEM = 'pathItems' def __init__(self, name, type, schema=None, object=None): self.name = name self.type = type self.schema = schema self.object = object def __bool__(self): return bool(self.name and self.type and self.object) def key(self) -> Tuple[str, str]: return self.name, self.type def ref(self) -> _SchemaType: assert self.__bool__() return {'$ref': f'#/components/{self.type}/{self.name}'} def load_enum_name_overrides(): overrides = {} for name, choices in spectacular_settings.ENUM_NAME_OVERRIDES.items(): if isinstance(choices, str): choices = deep_import_string(choices) if not choices: warn( f'unable to load choice override for {name} from ENUM_NAME_OVERRIDES. ' f'please check module path string.' ) continue if inspect.isclass(choices) and issubclass(choices, Choices): choices = choices.choices if inspect.isclass(choices) and issubclass(choices, Enum): choices = [(c.value, c.name) for c in choices] normalized_choices = [] for choice in choices: # Allow None values in the simple values list case if isinstance(choice, str) or choice is None: # TODO warning normalized_choices.append((choice, choice)) # simple choice list elif isinstance(choice[1], (list, tuple)): normalized_choices.extend(choice[1]) # categorized nested choices else: normalized_choices.append(choice) # normal 2-tuple form # Get all of choice values that should be used in the hash, blank and None values get excluded # in the post-processing hook for enum overrides, so we do the same here to ensure the hashes match hashable_values = [ (value, label) for value, label in normalized_choices if value not in ['', None] ] overrides[list_hash(hashable_values)] = name if len(spectacular_settings.ENUM_NAME_OVERRIDES) != len(overrides): error( 'ENUM_NAME_OVERRIDES has duplication issues. Encountered multiple names ' 'for the same choice set. Enum naming might be unexpected.' ) return overrides def list_hash(lst: Any) -> str: return hashlib.sha256(json.dumps(list(lst), sort_keys=True, cls=JSONEncoder).encode()).hexdigest()[:16] spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) The provided code snippet includes necessary dependencies for implementing the `postprocess_schema_enums` function. Write a Python function `def postprocess_schema_enums(result, generator, **kwargs)` to solve the following problem: simple replacement of Enum/Choices that globally share the same name and have the same choices. Aids client generation to not generate a separate enum for every occurrence. only takes effect when replacement is guaranteed to be correct. Here is the function: def postprocess_schema_enums(result, generator, **kwargs): """ simple replacement of Enum/Choices that globally share the same name and have the same choices. Aids client generation to not generate a separate enum for every occurrence. only takes effect when replacement is guaranteed to be correct. """ def iter_prop_containers(schema, component_name=None): if not component_name: for component_name, schema in schema.items(): if spectacular_settings.COMPONENT_SPLIT_PATCH: component_name = re.sub('^Patched(.+)', r'\1', component_name) if spectacular_settings.COMPONENT_SPLIT_REQUEST: component_name = re.sub('(.+)Request$', r'\1', component_name) yield from iter_prop_containers(schema, component_name) elif isinstance(schema, list): for item in schema: yield from iter_prop_containers(item, component_name) elif isinstance(schema, dict): if schema.get('properties'): yield component_name, schema['properties'] yield from iter_prop_containers(schema.get('oneOf', []), component_name) yield from iter_prop_containers(schema.get('allOf', []), component_name) yield from iter_prop_containers(schema.get('anyOf', []), component_name) def create_enum_component(name, schema): component = ResolvedComponent( name=name, type=ResolvedComponent.SCHEMA, schema=schema, object=name, ) generator.registry.register_on_missing(component) return component def extract_hash(schema): if 'x-spec-enum-id' in schema: # try to use the injected enum hash first as it generated from (name, value) tuples, # which prevents collisions on choice sets only differing in labels not values. return schema['x-spec-enum-id'] else: # fall back to actual list hashing when we encounter enums not generated by us. # remove blank/null entry for hashing. will be reconstructed in the last step return list_hash([(i, i) for i in schema['enum'] if i not in ('', None)]) schemas = result.get('components', {}).get('schemas', {}) overrides = load_enum_name_overrides() prop_hash_mapping = defaultdict(set) hash_name_mapping = defaultdict(set) # collect all enums, their names and choice sets for component_name, props in iter_prop_containers(schemas): for prop_name, prop_schema in props.items(): if prop_schema.get('type') == 'array': prop_schema = prop_schema.get('items', {}) if 'enum' not in prop_schema: continue prop_enum_cleaned_hash = extract_hash(prop_schema) prop_hash_mapping[prop_name].add(prop_enum_cleaned_hash) hash_name_mapping[prop_enum_cleaned_hash].add((component_name, prop_name)) # get the suffix to be used for enums from settings enum_suffix = spectacular_settings.ENUM_SUFFIX # traverse all enum properties and generate a name for the choice set. naming collisions # are resolved and a warning is emitted. giving a choice set multiple names is technically # correct but potentially unwanted. also emit a warning there to make the user aware. enum_name_mapping = {} for prop_name, prop_hash_set in prop_hash_mapping.items(): for prop_hash in prop_hash_set: if prop_hash in overrides: enum_name = overrides[prop_hash] elif len(prop_hash_set) == 1: # prop_name has been used exclusively for one choice set (best case) enum_name = f'{camelize(prop_name)}{enum_suffix}' elif len(hash_name_mapping[prop_hash]) == 1: # prop_name has multiple choice sets, but each one limited to one component only component_name, _ = next(iter(hash_name_mapping[prop_hash])) enum_name = f'{camelize(component_name)}{camelize(prop_name)}{enum_suffix}' else: enum_name = f'{camelize(prop_name)}{prop_hash[:3].capitalize()}{enum_suffix}' warn( f'enum naming encountered a non-optimally resolvable collision for fields ' f'named "{prop_name}". The same name has been used for multiple choice sets ' f'in multiple components. The collision was resolved with "{enum_name}". ' f'add an entry to ENUM_NAME_OVERRIDES to fix the naming.' ) if enum_name_mapping.get(prop_hash, enum_name) != enum_name: warn( f'encountered multiple names for the same choice set ({enum_name}). This ' f'may be unwanted even though the generated schema is technically correct. ' f'Add an entry to ENUM_NAME_OVERRIDES to fix the naming.' ) del enum_name_mapping[prop_hash] else: enum_name_mapping[prop_hash] = enum_name enum_name_mapping[(prop_hash, prop_name)] = enum_name # replace all enum occurrences with a enum schema component. cut out the # enum, replace it with a reference and add a corresponding component. for _, props in iter_prop_containers(schemas): for prop_name, prop_schema in props.items(): is_array = prop_schema.get('type') == 'array' if is_array: prop_schema = prop_schema.get('items', {}) if 'enum' not in prop_schema: continue prop_enum_original_list = prop_schema['enum'] prop_schema['enum'] = [i for i in prop_schema['enum'] if i not in ['', None]] prop_hash = extract_hash(prop_schema) # when choice sets are reused under multiple names, the generated name cannot be # resolved from the hash alone. fall back to prop_name and hash for resolution. enum_name = enum_name_mapping.get(prop_hash) or enum_name_mapping[prop_hash, prop_name] # split property into remaining property and enum component parts enum_schema = {k: v for k, v in prop_schema.items() if k in ['type', 'enum']} prop_schema = {k: v for k, v in prop_schema.items() if k not in ['type', 'enum', 'x-spec-enum-id']} # separate actual description from name-value tuples if spectacular_settings.ENUM_GENERATE_CHOICE_DESCRIPTION: if prop_schema.get('description', '').startswith('*'): enum_schema['description'] = prop_schema.pop('description') elif '\n\n*' in prop_schema.get('description', ''): _, _, post = prop_schema['description'].partition('\n\n*') enum_schema['description'] = '*' + post components = [ create_enum_component(enum_name, schema=enum_schema) ] if spectacular_settings.ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE: if '' in prop_enum_original_list: components.append(create_enum_component(f'Blank{enum_suffix}', schema={'enum': ['']})) if None in prop_enum_original_list: if spectacular_settings.OAS_VERSION.startswith('3.1'): components.append(create_enum_component(f'Null{enum_suffix}', schema={'type': 'null'})) else: components.append(create_enum_component(f'Null{enum_suffix}', schema={'enum': [None]})) # undo OAS 3.1 type list NULL construction as we cover this in a separate component already if spectacular_settings.OAS_VERSION.startswith('3.1') and isinstance(enum_schema['type'], list): enum_schema['type'] = [t for t in enum_schema['type'] if t != 'null'][0] if len(components) == 1: prop_schema.update(components[0].ref) else: prop_schema.update({'oneOf': [c.ref for c in components]}) if is_array: props[prop_name]['items'] = safe_ref(prop_schema) else: props[prop_name] = safe_ref(prop_schema) # sort again with additional components result['components'] = generator.registry.build(spectacular_settings.APPEND_COMPONENTS) # remove remaining ids that were not part of this hook (operation parameters mainly) postprocess_schema_enum_id_removal(result, generator) return result
simple replacement of Enum/Choices that globally share the same name and have the same choices. Aids client generation to not generate a separate enum for every occurrence. only takes effect when replacement is guaranteed to be correct.
2,126
import re from collections import defaultdict from inflection import camelize from rest_framework.settings import api_settings from drf_spectacular.drainage import warn from drf_spectacular.plumbing import ( ResolvedComponent, list_hash, load_enum_name_overrides, safe_ref, ) from drf_spectacular.settings import spectacular_settings The provided code snippet includes necessary dependencies for implementing the `preprocess_exclude_path_format` function. Write a Python function `def preprocess_exclude_path_format(endpoints, **kwargs)` to solve the following problem: preprocessing hook that filters out {format} suffixed paths, in case format_suffix_patterns is used and {format} path params are unwanted. Here is the function: def preprocess_exclude_path_format(endpoints, **kwargs): """ preprocessing hook that filters out {format} suffixed paths, in case format_suffix_patterns is used and {format} path params are unwanted. """ format_path = f'{{{api_settings.FORMAT_SUFFIX_KWARG}}}' return [ (path, path_regex, method, callback) for path, path_regex, method, callback in endpoints if not (path.endswith(format_path) or path.endswith(format_path + '/')) ]
preprocessing hook that filters out {format} suffixed paths, in case format_suffix_patterns is used and {format} path params are unwanted.
2,127
from contextlib import contextmanager from typing import Any, Dict from django.conf import settings from rest_framework.settings import APISettings, perform_import spectacular_settings = SpectacularSettings( user_settings=getattr(settings, 'SPECTACULAR_SETTINGS', {}), # type: ignore defaults=SPECTACULAR_DEFAULTS, # type: ignore import_strings=IMPORT_STRINGS, ) The provided code snippet includes necessary dependencies for implementing the `patched_settings` function. Write a Python function `def patched_settings(patches)` to solve the following problem: temporarily patch the global spectacular settings (or do nothing) Here is the function: def patched_settings(patches): """ temporarily patch the global spectacular settings (or do nothing) """ if not patches: yield else: try: spectacular_settings.apply_patches(patches) yield finally: spectacular_settings.clear_patches()
temporarily patch the global spectacular settings (or do nothing)
2,128
from django.utils.module_loading import import_string The provided code snippet includes necessary dependencies for implementing the `lazy_serializer` function. Write a Python function `def lazy_serializer(path: str)` to solve the following problem: simulate initiated object but actually load class and init on first usage Here is the function: def lazy_serializer(path: str): """ simulate initiated object but actually load class and init on first usage """ class LazySerializer: def __init__(self, *args, **kwargs): self.lazy_args, self.lazy_kwargs, self.lazy_obj = args, kwargs, None def __getattr__(self, item): if not self.lazy_obj: self.lazy_obj = import_string(path)(*self.lazy_args, **self.lazy_kwargs) return getattr(self.lazy_obj, item) @property # type: ignore def __class__(self): return self.__getattr__('__class__') @property def __dict__(self): return self.__getattr__('__dict__') def __str__(self): return self.__getattr__('__str__')() def __repr__(self): return self.__getattr__('__repr__')() return LazySerializer
simulate initiated object but actually load class and init on first usage
2,129
from django.utils.module_loading import import_string def set_override(obj: Any, prop: str, value: Any) -> Any: if not hasattr(obj, '_spectacular_annotation'): obj._spectacular_annotation = {} elif '_spectacular_annotation' not in obj.__dict__: obj._spectacular_annotation = obj._spectacular_annotation.copy() obj._spectacular_annotation[prop] = value return obj def extend_schema_serializer( many: Optional[bool] = None, exclude_fields: Optional[Sequence[str]] = None, deprecate_fields: Optional[Sequence[str]] = None, examples: Optional[Sequence[OpenApiExample]] = None, extensions: Optional[Dict[str, Any]] = None, component_name: Optional[str] = None, ) -> Callable[[F], F]: """ Decorator for the "serializer" kind. Intended for overriding default serializer behaviour that cannot be influenced through :func:`@extend_schema <.extend_schema>`. :param many: override how serializer is initialized. Mainly used to coerce the list view detection heuristic to acknowledge a non-list serializer. :param exclude_fields: fields to ignore while processing the serializer. only affects the schema. fields will still be exposed through the API. :param deprecate_fields: fields to mark as deprecated while processing the serializer. :param examples: define example data to serializer. :param extensions: specification extensions, e.g. ``x-is-dynamic``, etc. :param component_name: override default class name extraction. """ def decorator(klass): if many is not None: set_override(klass, 'many', many) if exclude_fields: set_override(klass, 'exclude_fields', exclude_fields) if deprecate_fields: set_override(klass, 'deprecate_fields', deprecate_fields) if examples: set_override(klass, 'examples', examples) if extensions: set_override(klass, 'extensions', extensions) if component_name: set_override(klass, 'component_name', component_name) return klass return decorator def forced_singular_serializer(serializer_class): from drf_spectacular.drainage import set_override from drf_spectacular.utils import extend_schema_serializer patched_serializer_class = type(serializer_class.__name__, (serializer_class,), {}) extend_schema_serializer(many=False)(patched_serializer_class) set_override(patched_serializer_class, 'suppress_collision_warning', True) return patched_serializer_class
null
2,130
import json from collections import namedtuple from importlib import import_module from typing import Any, Dict, List, Optional, Type from django.conf import settings from django.templatetags.static import static from django.utils import translation from django.utils.translation import gettext_lazy as _ from django.views.generic import RedirectView from rest_framework.renderers import TemplateHTMLRenderer from rest_framework.response import Response from rest_framework.reverse import reverse from rest_framework.settings import api_settings from rest_framework.views import APIView from drf_spectacular.generators import SchemaGenerator from drf_spectacular.plumbing import get_relative_url, set_query_parameters from drf_spectacular.renderers import ( OpenApiJsonRenderer, OpenApiJsonRenderer2, OpenApiYamlRenderer, OpenApiYamlRenderer2, ) from drf_spectacular.settings import patched_settings, spectacular_settings from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import OpenApiParameter, extend_schema def _get_sidecar_url(filepath): return static(f'drf_spectacular_sidecar/{filepath}')
null
2,131
import os import requests from typing import Dict, Optional, List from huggingface_hub.utils import build_hf_headers from text_generation import Client, AsyncClient, __version__ from text_generation.types import DeployedModel from text_generation.errors import NotSupportedError, parse_error class DeployedModel(BaseModel): model_id: str sha: str def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: """ Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception """ # Try to parse a Text Generation Inference error message = payload["error"] if "error_type" in payload: error_type = payload["error_type"] if error_type == "generation": return GenerationError(message) if error_type == "incomplete_generation": return IncompleteGenerationError(message) if error_type == "overloaded": return OverloadedError(message) if error_type == "validation": return ValidationError(message) # Try to parse a APIInference error if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) # Fallback to an unknown error return UnknownError(message) The provided code snippet includes necessary dependencies for implementing the `deployed_models` function. Write a Python function `def deployed_models(headers: Optional[Dict] = None) -> List[DeployedModel]` to solve the following problem: Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models Here is the function: def deployed_models(headers: Optional[Dict] = None) -> List[DeployedModel]: """ Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models """ resp = requests.get( f"https://api-inference.huggingface.co/framework/text-generation-inference", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) models = [DeployedModel(**raw_deployed_model) for raw_deployed_model in payload] return models
Get all currently deployed models with text-generation-inference-support Returns: List[DeployedModel]: list of all currently deployed models
2,132
import os import requests from typing import Dict, Optional, List from huggingface_hub.utils import build_hf_headers from text_generation import Client, AsyncClient, __version__ from text_generation.types import DeployedModel from text_generation.errors import NotSupportedError, parse_error def parse_error(status_code: int, payload: Dict[str, str]) -> Exception: """ Parse error given an HTTP status code and a json payload Args: status_code (`int`): HTTP status code payload (`Dict[str, str]`): Json payload Returns: Exception: parsed exception """ # Try to parse a Text Generation Inference error message = payload["error"] if "error_type" in payload: error_type = payload["error_type"] if error_type == "generation": return GenerationError(message) if error_type == "incomplete_generation": return IncompleteGenerationError(message) if error_type == "overloaded": return OverloadedError(message) if error_type == "validation": return ValidationError(message) # Try to parse a APIInference error if status_code == 400: return BadRequestError(message) if status_code == 403 or status_code == 424: return ShardNotReadyError(message) if status_code == 504: return ShardTimeoutError(message) if status_code == 404: return NotFoundError(message) if status_code == 429: return RateLimitExceededError(message) # Fallback to an unknown error return UnknownError(message) The provided code snippet includes necessary dependencies for implementing the `check_model_support` function. Write a Python function `def check_model_support(repo_id: str, headers: Optional[Dict] = None) -> bool` to solve the following problem: Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client Here is the function: def check_model_support(repo_id: str, headers: Optional[Dict] = None) -> bool: """ Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client """ resp = requests.get( f"https://api-inference.huggingface.co/status/{repo_id}", headers=headers, timeout=5, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) framework = payload["framework"] supported = framework == "text-generation-inference" return supported
Check if a given model is supported by text-generation-inference Returns: bool: whether the model is supported by this client
2,133
import os import sys import typer from pathlib import Path from loguru import logger from typing import Optional from enum import Enum from huggingface_hub import hf_hub_download class Quantization(str, Enum): bitsandbytes = "bitsandbytes" bitsandbytes_nf4 = "bitsandbytes-nf4" bitsandbytes_fp4 = "bitsandbytes-fp4" gptq = "gptq" awq = "awq" eetq = "eetq" class Dtype(str, Enum): float16 = "float16" bloat16 = "bfloat16" def setup_tracing(shard: int, otlp_endpoint: str): resource = Resource.create( attributes={"service.name": f"text-generation-inference.server-{shard}"} ) span_exporter = OTLPSpanExporter(endpoint=otlp_endpoint, insecure=True) span_processor = BatchSpanProcessor(span_exporter) trace.set_tracer_provider(TracerProvider(resource=resource)) trace.get_tracer_provider().add_span_processor(span_processor) def serve( model_id: str, revision: Optional[str] = None, sharded: bool = False, quantize: Optional[Quantization] = None, speculate: Optional[int] = None, dtype: Optional[Dtype] = None, trust_remote_code: bool = False, uds_path: Path = "/tmp/text-generation-server", logger_level: str = "INFO", json_output: bool = False, otlp_endpoint: Optional[str] = None, ): if sharded: assert ( os.getenv("RANK", None) is not None ), "RANK must be set when sharded is True" assert ( os.getenv("WORLD_SIZE", None) is not None ), "WORLD_SIZE must be set when sharded is True" assert ( os.getenv("MASTER_ADDR", None) is not None ), "MASTER_ADDR must be set when sharded is True" assert ( os.getenv("MASTER_PORT", None) is not None ), "MASTER_PORT must be set when sharded is True" # Remove default handler logger.remove() logger.add( sys.stdout, format="{message}", filter="text_generation_server", level=logger_level, serialize=json_output, backtrace=True, diagnose=False, ) # Import here after the logger is added to log potential import exceptions from text_generation_server import server from text_generation_server.tracing import setup_tracing # Setup OpenTelemetry distributed tracing if otlp_endpoint is not None: setup_tracing(shard=os.getenv("RANK", 0), otlp_endpoint=otlp_endpoint) # Downgrade enum into str for easier management later on quantize = None if quantize is None else quantize.value dtype = None if dtype is None else dtype.value if dtype is not None and quantize not in { None, "bitsandbytes", "bitsandbytes-nf4", "bitsandbytes-fp4", }: raise RuntimeError( "Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model." ) server.serve( model_id, revision, sharded, quantize, speculate, dtype, trust_remote_code, uds_path, )
null
2,134
import os import sys import typer from pathlib import Path from loguru import logger from typing import Optional from enum import Enum from huggingface_hub import hf_hub_download def download_weights( model_id: str, revision: Optional[str] = None, extension: str = ".safetensors", auto_convert: bool = True, logger_level: str = "INFO", json_output: bool = False, trust_remote_code: bool = False, ): def quantize( model_id: str, output_dir: str, revision: Optional[str] = None, logger_level: str = "INFO", json_output: bool = False, trust_remote_code: bool = False, upload_to_model_id: Optional[str] = None, percdamp: float = 0.01, act_order: bool = False, ): if revision is None: revision = "main" download_weights( model_id=model_id, revision=revision, logger_level=logger_level, json_output=json_output, ) from text_generation_server.utils.gptq.quantize import quantize quantize( model_id=model_id, bits=4, groupsize=128, output_dir=output_dir, revision=revision, trust_remote_code=trust_remote_code, upload_to_model_id=upload_to_model_id, percdamp=percdamp, act_order=act_order, )
null
2,135
import asyncio import os import torch import time from grpc import aio from loguru import logger from grpc_reflection.v1alpha import reflection from pathlib import Path from typing import List, Optional from text_generation_server.cache import Cache from text_generation_server.interceptor import ExceptionInterceptor from text_generation_server.models import Model, get_model from text_generation_server.pb import generate_pb2_grpc, generate_pb2 from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer): def __init__( self, model: Model, cache: Cache, quantize: Optional[str], server_urls: List[str], ): self.cache = cache self.model = model self.quantize = quantize self.server_urls = server_urls # For some reason, inference_mode does not work well with GLOO which we use on CPU if model.device.type == "cuda": # Force inference mode for the lifetime of TextGenerationService self._inference_mode_raii_guard = torch._C._InferenceMode(True) async def Info(self, request, context): return self.model.info async def Health(self, request, context): if self.model.device.type == "cuda": torch.zeros((2, 2)).cuda() return generate_pb2.HealthResponse() async def ServiceDiscovery(self, request, context): return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls) async def ClearCache(self, request, context): if request.HasField("id"): self.cache.delete(request.id) else: self.cache.clear() return generate_pb2.ClearCacheResponse() async def FilterBatch(self, request, context): batch = self.cache.pop(request.batch_id) if batch is None: raise ValueError(f"Batch ID {request.batch_id} not found in cache.") filtered_batch = batch.filter(request.request_ids) self.cache.set(filtered_batch) return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb()) async def Warmup(self, request, context): if self.quantize == "gptq": try: # When using GPTQ, Exllama kernels need some global kernels # For which we have the finale shapes only after the model has loaded # This will allocate those buffers. from text_generation_server.utils.layers import ( create_exllama_buffers, set_device, ) set_device(self.model.device) create_exllama_buffers(request.max_prefill_tokens) except ImportError: pass if ( self.model.batch_type == IdeficsCausalLMBatch ): # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.processor, self.model.dtype, self.model.device, ) else: batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.dtype, self.model.device ) max_supported_total_tokens = self.model.warmup(batch) return generate_pb2.WarmupResponse( max_supported_total_tokens=max_supported_total_tokens ) async def Prefill(self, request, context): start = time.time_ns() if ( self.model.batch_type == IdeficsCausalLMBatch ): # Hack, i would rather use kwargs in the `from_pb` call batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.processor, self.model.dtype, self.model.device, ) else: batch = self.model.batch_type.from_pb( request.batch, self.model.tokenizer, self.model.dtype, self.model.device ) generations, next_batch, timings = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.PrefillResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start, ) async def Decode(self, request, context): start = time.time_ns() if len(request.batches) == 0: raise ValueError("Must provide at least one batch") batches = [] for batch_pb in request.batches: batch = self.cache.pop(batch_pb.id) if batch is None: raise ValueError(f"Batch ID {batch_pb.id} not found in cache.") batches.append(batch) if len(batches) == 0: raise ValueError("All batches are empty") if len(batches) > 1: start_concat = time.time_ns() batch = self.model.batch_type.concatenate(batches) concat_ns = time.time_ns() - start_concat else: batch = batches[0] concat_ns = None generations, next_batch, timings = self.model.generate_token(batch) self.cache.set(next_batch) return generate_pb2.DecodeResponse( generations=[generation.to_pb() for generation in generations], batch=next_batch.to_pb() if next_batch else None, concat_ns=concat_ns, forward_ns=timings[0], decode_ns=timings[1], total_ns=time.time_ns() - start, ) class Cache: def __init__(self): self.cache: Dict[int, B] = {} def pop(self, batch_id: int) -> Optional[B]: return self.cache.pop(batch_id, None) def set(self, entry: B): if entry is not None: self.cache[entry.batch_id] = entry def delete(self, batch_id: int): batch = self.pop(batch_id) if batch is not None: del batch if torch.cuda.is_available(): torch.cuda.empty_cache() def clear(self): keys = list(self.cache.keys()) for k in keys: self.delete(k) def __len__(self): return len(self.cache.keys()) class ExceptionInterceptor(AsyncServerInterceptor): async def intercept( self, method: Callable, request_or_iterator: Any, context: grpc.ServicerContext, method_name: str, ) -> Any: try: response = method(request_or_iterator, context) return await response except Exception as err: method_name = method_name.split("/")[-1] logger.exception(f"Method {method_name} encountered an error.") if torch.cuda.is_available(): torch.cuda.empty_cache() await context.abort_with_status( rpc_status.to_status( status_pb2.Status(code=code_pb2.INTERNAL, message=str(err)) ) ) def get_model( model_id: str, revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, ) -> Model: if dtype is None: # Keep it as default for now and let # every model resolve their own default dtype. dtype = None elif dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 else: raise RuntimeError(f"Unknown dtype {dtype}") if speculate is not None: set_speculate(speculate) else: set_speculate(0) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) use_medusa = None if "medusa_num_heads" in config_dict: medusa_model_id = model_id medusa_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_medusa = config_dict["medusa_num_heads"] if speculate is not None: if speculate > speculate_medusa: raise RuntimeError( "Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_medusa) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) is_local = Path(medusa_model_id).exists() if not is_local: medusa_config = hf_hub_download( medusa_model_id, revision=medusa_revision, filename="config.json" ) hf_hub_download( medusa_model_id, revision=medusa_revision, filename="medusa_lm_head.safetensors", ) use_medusa = Path(medusa_config).parent else: use_medusa = Path(medusa_model_id) method = "medusa" else: method = "n-gram" speculate = get_speculate() if speculate > 0: logger.info(f"Using speculation {method} with {speculate} input ids.") model_type = config_dict.get("model_type", None) if model_type is None: # TODO: fix how we determine model type for Mamba if "ssm_cfg" in config_dict: # *only happens in Mamba case model_type = "ssm" else: raise RuntimeError( f"Could not determine model type for {model_id} revision {revision}" ) if model_type == "ssm": return Mamba( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_id.startswith("facebook/galactica"): return GalacticaSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if ( model_type == "gpt_bigcode" or model_type == "gpt2" and model_id.startswith("bigcode/") ): if FLASH_ATTENTION: return FlashSantacoderSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Santacoder") ) else: return SantaCoder( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "bloom": return BLOOMSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "mpt": return MPTSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "gpt_neox": if FLASH_ATTENTION: return FlashNeoXSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: return GPTNeoxSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi": if FLASH_ATTENTION: return FlashPhi( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi-msft": if FLASH_ATTENTION: raise NotImplementedError( "Legacy phi-msft is not supported with Flash Attention" ) else: return Phi( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "llama" or model_type == "baichuan": if FLASH_ATTENTION: return FlashLlama( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Llama")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "gemma": if FLASH_ATTENTION: return FlashGemma( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in ["RefinedWeb", "RefinedWebModel", "falcon"]: if sharded: if FLASH_ATTENTION: if config_dict.get("alibi", False): raise NotImplementedError("sharded is not supported for this model") return FlashRWSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(f"Sharded Falcon")) else: if FLASH_ATTENTION and not config_dict.get("alibi", False): return FlashRWSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return RW( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "mistral": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashMistral( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mistral")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "mixtral": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashMixtral( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mixtral")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "starcoder2": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashStarcoder2( model_id, revision, quantize=quantize, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Starcoder2") ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "qwen2": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashQwen2( model_id, revision, quantize=quantize, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "opt": return OPTSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "t5": return T5Sharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "idefics": if FLASH_ATTENTION: return IDEFICSSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if sharded: raise NotImplementedError("sharded is not supported for AutoModel") if quantize == "gptq": raise NotImplementedError( "gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" ) if quantize == "awq": raise NotImplementedError("awq quantization is not supported for AutoModel") elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"): raise NotImplementedError("4bit quantization is not supported for AutoModel") elif quantize == "eetq": raise NotImplementedError("Eetq quantization is not supported for AutoModel") if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES: return Seq2SeqLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) auto_map = config_dict.get("auto_map", None) if trust_remote_code and auto_map is not None: if "AutoModelForCausalLM" in auto_map.keys(): return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if "AutoModelForSeq2SeqLM" in auto_map.keys(): return Seq2SeqLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) raise ValueError(f"Unsupported model type {model_type}") class UDSOpenTelemetryAioServerInterceptor(OpenTelemetryAioServerInterceptor): def __init__(self): super().__init__(trace.get_tracer(__name__)) def _start_span(self, handler_call_details, context, set_status_on_exception=False): """ Rewrite _start_span method to support Unix Domain Socket gRPC contexts """ # standard attributes attributes = { SpanAttributes.RPC_SYSTEM: "grpc", SpanAttributes.RPC_GRPC_STATUS_CODE: grpc.StatusCode.OK.value[0], } # if we have details about the call, split into service and method if handler_call_details.method: service, method = handler_call_details.method.lstrip("/").split("/", 1) attributes.update( { SpanAttributes.RPC_METHOD: method, SpanAttributes.RPC_SERVICE: service, } ) # add some attributes from the metadata metadata = dict(context.invocation_metadata()) if "user-agent" in metadata: attributes["rpc.user_agent"] = metadata["user-agent"] # We use gRPC over a UNIX socket attributes.update({SpanAttributes.NET_TRANSPORT: "unix"}) return self._tracer.start_as_current_span( name=handler_call_details.method, kind=trace.SpanKind.SERVER, attributes=attributes, set_status_on_exception=set_status_on_exception, ) def serve( model_id: str, revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, uds_path: Path, ): async def serve_inner( model_id: str, revision: Optional[str], sharded: bool = False, quantize: Optional[str] = None, speculate: Optional[int] = None, dtype: Optional[str] = None, trust_remote_code: bool = False, ): unix_socket_template = "unix://{}-{}" if sharded: server_urls = [ unix_socket_template.format(uds_path, rank) for rank in range(int(os.environ["WORLD_SIZE"])) ] local_url = server_urls[int(os.environ["RANK"])] else: local_url = unix_socket_template.format(uds_path, 0) server_urls = [local_url] try: model = get_model( model_id, revision, sharded, quantize, speculate, dtype, trust_remote_code, ) except Exception: logger.exception("Error when initializing model") raise server = aio.server( interceptors=[ ExceptionInterceptor(), UDSOpenTelemetryAioServerInterceptor(), ] ) generate_pb2_grpc.add_TextGenerationServiceServicer_to_server( TextGenerationService(model, Cache(), quantize, server_urls), server ) SERVICE_NAMES = ( generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name, reflection.SERVICE_NAME, ) reflection.enable_server_reflection(SERVICE_NAMES, server) server.add_insecure_port(local_url) await server.start() logger.info("Server started at {}".format(local_url)) try: await server.wait_for_termination() except KeyboardInterrupt: logger.info("Signal received. Shutting down") await server.stop(0) asyncio.run( serve_inner( model_id, revision, sharded, quantize, speculate, dtype, trust_remote_code ) )
null
2,136
import torch import time from dataclasses import dataclass from opentelemetry import trace from transformers import ( AutoProcessor, AutoTokenizer, PreTrainedTokenizerBase, ProcessorMixin, ) from typing import Optional, Tuple, List, Type, Dict from text_generation_server.models import Model from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.pb import generate_pb2 from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling import re IMAGES = re.compile(r"!\[[^\]]*\]\((.*?)\s*(\"(?:.*[^\"])\")?\s*\)") def split(string): parts = [] cursor = 0 for pattern in IMAGES.finditer(string): start = pattern.start() if start != cursor: parts.append(string[cursor:start]) parts.append(pattern.group(1)) cursor = pattern.end() if cursor != len(string): parts.append(string[cursor:]) return parts
null
2,137
import torch import torch.distributed from transformers import AutoTokenizer, PreTrainedTokenizerBase from typing import Optional import os from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaConfig, ) from loguru import logger from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) from text_generation_server.models.globals import ENABLE_CUDA_GRAPHS, MEM_POOL import time from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaModel, InferenceParams, ) from text_generation_server.models import Model from typing import Any, List, Optional, Tuple, Type, Dict from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.utils.tokens import batch_top_tokens, Sampling from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_grad_enabled(False) class InferenceParams: """Inference parameters that are passed to the main model in order to efficienly calculate and store the context during inference.""" max_seqlen: int max_batch_size: int conv_states: torch.Tensor ssm_states: torch.Tensor seqlen_offset: int def new_inference_params( n_blocks: int, batch_size: int, d_inner: int, d_conv: int, d_state: int, seqlen_offset: int, dtype: torch.dtype, device: torch.device, ): max_seqlen = 0 conv_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_conv, ), device=device, dtype=dtype, ) ssm_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_state, ), device=device, dtype=dtype, ) inference_params = InferenceParams( max_seqlen=max_seqlen, max_batch_size=batch_size, seqlen_offset=seqlen_offset, conv_states=conv_states, ssm_states=ssm_states, ) return inference_params
null
2,138
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.flash_attn import attention from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, FastLayerNorm, PositionRotaryEmbedding, get_linear, ) def get_linear(weight, bias, quantize): if quantize is None: linear = FastLinear(weight, bias) elif quantize == "eetq": if HAS_EETQ: linear = EETQLinear(weight, bias) else: raise ImportError( "Please install EETQ from https://github.com/NetEase-FuXi/EETQ" ) elif quantize == "bitsandbytes": warn_deprecate_bnb() linear = Linear8bitLt( weight, bias, has_fp16_weights=False, threshold=6.0, ) if bias is not None: linear.bias = nn.Parameter(bias) elif quantize == "bitsandbytes-fp4": linear = Linear4bit( weight, bias, quant_type="fp4", ) elif quantize == "bitsandbytes-nf4": linear = Linear4bit( weight, bias, quant_type="nf4", ) elif quantize == "gptq": try: qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight except Exception: raise NotImplementedError( f"The passed weight is not `gptq` compatible, loader needs to be updated." ) if use_exllama: linear = ExllamaQuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize ) else: linear = QuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize, ) elif quantize == "awq": try: qweight, qzeros, scales, _, bits, groupsize, _ = weight except Exception: raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) if IS_ROCM_SYSTEM: raise NotImplementedError( "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " "to use Exllama/GPTQ kernels for AWQ inference." ) if not HAS_AWQ: raise NotImplementedError( "You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly" ) linear = WQLinear( w_bit=bits, group_size=groupsize, qweight=qweight, qzeros=qzeros, scales=scales, bias=bias is not None, ) else: raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.") return linear class TensorParallelRowLinear(SuperLayer): def __init__(self, linear, process_group): super().__init__(linear) self.process_group = process_group def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls( get_linear(weight, bias, config.quantize), process_group=weights.process_group, ) def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor: out = super().forward(input) if self.process_group.size() > 1 and reduce: torch.distributed.all_reduce(out, group=self.process_group) return out def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias, config.quantize) if config.use_parallel_residual: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group)
null
2,139
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.modeling_utils import PreTrainedModel from transformers.models.gpt_neox import GPTNeoXConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.flash_attn import attention from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, FastLayerNorm, PositionRotaryEmbedding, get_linear, ) def get_linear(weight, bias, quantize): if quantize is None: linear = FastLinear(weight, bias) elif quantize == "eetq": if HAS_EETQ: linear = EETQLinear(weight, bias) else: raise ImportError( "Please install EETQ from https://github.com/NetEase-FuXi/EETQ" ) elif quantize == "bitsandbytes": warn_deprecate_bnb() linear = Linear8bitLt( weight, bias, has_fp16_weights=False, threshold=6.0, ) if bias is not None: linear.bias = nn.Parameter(bias) elif quantize == "bitsandbytes-fp4": linear = Linear4bit( weight, bias, quant_type="fp4", ) elif quantize == "bitsandbytes-nf4": linear = Linear4bit( weight, bias, quant_type="nf4", ) elif quantize == "gptq": try: qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight except Exception: raise NotImplementedError( f"The passed weight is not `gptq` compatible, loader needs to be updated." ) if use_exllama: linear = ExllamaQuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize ) else: linear = QuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize, ) elif quantize == "awq": try: qweight, qzeros, scales, _, bits, groupsize, _ = weight except Exception: raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) if IS_ROCM_SYSTEM: raise NotImplementedError( "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " "to use Exllama/GPTQ kernels for AWQ inference." ) if not HAS_AWQ: raise NotImplementedError( "You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly" ) linear = WQLinear( w_bit=bits, group_size=groupsize, qweight=qweight, qzeros=qzeros, scales=scales, bias=bias is not None, ) else: raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.") return linear class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_qkv(config, prefix: str, weights, num_heads, head_size, hidden_size): weight = weights.get_multi_weights_col([prefix], quantize=config.quantize, dim=0) if isinstance(weight, torch.Tensor): # Only on non quantized versions weight = ( weight.view( num_heads, 3, head_size, hidden_size, ) .permute(1, 0, 2, 3) .reshape(-1, hidden_size) ) bias = weights.get_sharded(f"{prefix}.bias", dim=0) bias = bias.view(num_heads, 3, head_size).permute(1, 0, 2).reshape(-1) linear = get_linear(weight, bias, config.quantize) if config.use_parallel_residual: return linear else: return TensorParallelColumnLinear(linear)
null
2,140
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, FastLayerNorm, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" if config.use_bias: w = [ weights.get_sharded(f"{p}.bias", dim=0) for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"] ] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) else: bias = None return TensorParallelColumnLinear( get_linear(weight, bias=bias, quantize=config.quantize) ) class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=config.use_bias, )
null
2,141
import torch import torch.distributed import os from shutil import copyfile from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from tokenizers import processors from transformers.tokenization_utils_fast import PreTrainedTokenizerFast from transformers.utils import logging from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, ) def _load_gqa(config, prefix: str, weights): assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.head_dim num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear( get_linear(weight, bias=None, quantize=config.quantize) ) class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, )
null
2,142
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, ) def _load_gqa(config, prefix: str, weights): class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): def load(cls, config, prefix: str, weights, bias: bool): def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, )
null
2,143
from typing import Optional, Tuple, Union import os import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers import GPTNeoXConfig from loguru import logger from text_generation_server.utils.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) if ( torch.cuda.is_available() and not os.environ.get("DISABLE_CUSTOM_KERNELS", "False") == "True" ): try: from custom_kernels import fused_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass def make_causal_mask( input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int ) -> torch.BoolTensor: def expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: def prepare_attn_mask( attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int, ) -> torch.BoolTensor: # create causal mask # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] combined_attention_mask = None device = attention_mask.device _, src_length = input_shape if src_length > 1: combined_attention_mask = make_causal_mask( input_shape, device=device, past_key_values_length=past_key_values_length ) # [batch_size, seq_length] -> [batch_size, tgt_length, src_length] expanded_attn_mask = expand_mask(attention_mask, tgt_length=src_length) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask ) return combined_attention_mask
null
2,144
from typing import Optional, Tuple, Union import os import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.modeling_utils import PreTrainedModel from transformers import GPTNeoXConfig from loguru import logger from text_generation_server.utils.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) if ( torch.cuda.is_available() and not os.environ.get("DISABLE_CUSTOM_KERNELS", "False") == "True" ): try: from custom_kernels import fused_attention_cuda CUSTOM_KERNELS_ENABLED = True except ImportError: pass def rotary_forward(q, k, cos, sin, position_ids): cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) chunk_size = q.shape[-1] // 2 q1, q2 = q.split(chunk_size, -1) q_rotated = torch.cat((-q2, q1), dim=-1) k1, k2 = k.split(chunk_size, -1) k_rotated = torch.cat((-k2, k1), dim=-1) q_embed = (q * cos) + (q_rotated * sin) k_embed = (k * cos) + (k_rotated * sin) return q_embed, k_embed
null
2,145
import torch import torch.distributed from torch import nn from transformers.modeling_utils import PreTrainedModel from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.flash_attn import attention from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, FastLayerNorm, PositionRotaryEmbedding, get_linear, ) def get_linear(weight, bias, quantize): if quantize is None: linear = FastLinear(weight, bias) elif quantize == "eetq": if HAS_EETQ: linear = EETQLinear(weight, bias) else: raise ImportError( "Please install EETQ from https://github.com/NetEase-FuXi/EETQ" ) elif quantize == "bitsandbytes": warn_deprecate_bnb() linear = Linear8bitLt( weight, bias, has_fp16_weights=False, threshold=6.0, ) if bias is not None: linear.bias = nn.Parameter(bias) elif quantize == "bitsandbytes-fp4": linear = Linear4bit( weight, bias, quant_type="fp4", ) elif quantize == "bitsandbytes-nf4": linear = Linear4bit( weight, bias, quant_type="nf4", ) elif quantize == "gptq": try: qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight except Exception: raise NotImplementedError( f"The passed weight is not `gptq` compatible, loader needs to be updated." ) if use_exllama: linear = ExllamaQuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize ) else: linear = QuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize, ) elif quantize == "awq": try: qweight, qzeros, scales, _, bits, groupsize, _ = weight except Exception: raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) if IS_ROCM_SYSTEM: raise NotImplementedError( "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " "to use Exllama/GPTQ kernels for AWQ inference." ) if not HAS_AWQ: raise NotImplementedError( "You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly" ) linear = WQLinear( w_bit=bits, group_size=groupsize, qweight=qweight, qzeros=qzeros, scales=scales, bias=bias is not None, ) else: raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.") return linear class TensorParallelRowLinear(SuperLayer): def __init__(self, linear, process_group): super().__init__(linear) self.process_group = process_group def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls( get_linear(weight, bias, config.quantize), process_group=weights.process_group, ) def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor: out = super().forward(input) if self.process_group.size() > 1 and reduce: torch.distributed.all_reduce(out, group=self.process_group) return out def load_row(config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None linear = get_linear(weight, bias, config.quantize) if config.parallel_attn: return linear else: return TensorParallelRowLinear(linear, process_group=weights.process_group)
null
2,146
import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x
null
2,147
import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear( get_linear(weight, bias=None, quantize=config.quantize) ) class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, )
null
2,148
import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) def _load_experts(config, prefix, mat, weights): if config.quantize is not None: raise NotImplementedError("Mixtral does not support weight quantization yet.") assert mat in ["w1", "w2", "w3"] world_size = weights.process_group.size() rank = weights.process_group.rank() assert ( config.intermediate_size % world_size == 0 ), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards" block_size = config.intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty( (config.num_local_experts * block_size, config.hidden_size), dtype=weights.dtype, device=weights.device, ) for i in range(config.num_local_experts): slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight") if mat == "w2": expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] tensor[i * block_size : (i + 1) * block_size] = expert_slice.to( dtype=weights.dtype ).to(device=weights.device) return tensor
null
2,149
import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) def select_experts(gate_logits: torch.Tensor, top_k: int): # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) # weights, selected_experts: (sequence_length, top-k) weights, selected_experts = torch.topk(all_probs, top_k, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return selected_experts, weights
null
2,150
import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode="trunc") * value
null
2,151
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, FastLayerNorm, get_linear, ) def _load_multi_mqa_gptq( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose: world_size = weights.process_group.size() rank = weights.process_group.rank() slice_ = weights._get_slice(f"{prefix}.c_attn.qweight") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] qweight = torch.cat([q_tensor, kv_tensor], dim=1) qweight = qweight.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.scales") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] scales = torch.cat([q_tensor, kv_tensor], dim=1) scales = scales.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.qzeros") shape = slice_.get_shape() block_size = (shape[1] - (2 * head_size) * 4 // 32) // world_size start = rank * block_size stop = (rank + 1) * block_size assert 2 * head_size % (32 // 4) == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size * 4 // 32 :] qzeros = torch.cat([q_tensor, kv_tensor], dim=1) qzeros = qzeros.to(device=weights.device) ( bits, groupsize, _, quant_method, ) = weights._get_gptq_params() if quant_method == "gptq": g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") g_idx = g_idx.to(device=weights.device) elif quant_method == "awq": g_idx = None from text_generation_server.utils.awq.conversion_utils import ( fast_awq_to_gptq, ) qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) from text_generation_server.utils.layers import HAS_EXLLAMA use_exllama = HAS_EXLLAMA weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) bias = bias.to(device=weights.device) return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) else: raise NotImplementedError("Gptq loading with santacoder is not implemented") def _load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if any("c_attn" in k for k in weights.routing.keys()): slice_ = weights._get_slice(f"{prefix}.c_attn.weight") shape = slice_.get_shape() world_size = weights.process_group.size() rank = weights.process_group.rank() if config.transpose: block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=1).T else: block_size = (shape[0] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=0) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) else: if config.transpose: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=1).T, weights.get_tensor(f"{prefix}.kv_attn.weight").T, ] weight = torch.cat(w, dim=0) else: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=0), weights.get_tensor(f"{prefix}.kv_attn.weight"), ] weight = torch.cat(w, dim=1) if bias: b = [ weights.get_sharded(f"{prefix}.q_attn.bias", dim=0), weights.get_tensor(f"{prefix}.kv_attn.bias"), ] bias = torch.cat(b, dim=0) else: bias = None weight = weight.to(dtype=weights.dtype).to(device=weights.device) assert list(weight.shape) == [ (num_heads + 2) * head_size, hidden_size, ], f"{weight.shape} != {[(num_heads + 2) * head_size, hidden_size]}" if bias is not None: bias = bias.to(dtype=weights.dtype).to(device=weights.device) assert list(bias.shape) == [ (num_heads + 2) * head_size ], f"{weight.shape} != {[(num_heads + 2) * head_size]}" return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) def load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if config.quantize == "gptq": return _load_multi_mqa_gptq( config, prefix, weights, bias, head_size, num_heads, hidden_size ) else: return _load_multi_mqa( config, prefix, weights, bias, head_size, num_heads, hidden_size )
null
2,152
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, FastLayerNorm, get_linear, ) def get_linear(weight, bias, quantize): if quantize is None: linear = FastLinear(weight, bias) elif quantize == "eetq": if HAS_EETQ: linear = EETQLinear(weight, bias) else: raise ImportError( "Please install EETQ from https://github.com/NetEase-FuXi/EETQ" ) elif quantize == "bitsandbytes": warn_deprecate_bnb() linear = Linear8bitLt( weight, bias, has_fp16_weights=False, threshold=6.0, ) if bias is not None: linear.bias = nn.Parameter(bias) elif quantize == "bitsandbytes-fp4": linear = Linear4bit( weight, bias, quant_type="fp4", ) elif quantize == "bitsandbytes-nf4": linear = Linear4bit( weight, bias, quant_type="nf4", ) elif quantize == "gptq": try: qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight except Exception: raise NotImplementedError( f"The passed weight is not `gptq` compatible, loader needs to be updated." ) if use_exllama: linear = ExllamaQuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize ) else: linear = QuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize, ) elif quantize == "awq": try: qweight, qzeros, scales, _, bits, groupsize, _ = weight except Exception: raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) if IS_ROCM_SYSTEM: raise NotImplementedError( "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " "to use Exllama/GPTQ kernels for AWQ inference." ) if not HAS_AWQ: raise NotImplementedError( "You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly" ) linear = WQLinear( w_bit=bits, group_size=groupsize, qweight=qweight, qzeros=qzeros, scales=scales, bias=bias is not None, ) else: raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.") return linear class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_col(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=1).T else: weight = weights.get_multi_weights_col( [prefix], quantize=config.quantize, dim=0 ) if bias: bias = weights.get_sharded(f"{prefix}.bias", dim=0) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize))
null
2,153
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, FastLayerNorm, get_linear, ) def get_linear(weight, bias, quantize): if quantize is None: linear = FastLinear(weight, bias) elif quantize == "eetq": if HAS_EETQ: linear = EETQLinear(weight, bias) else: raise ImportError( "Please install EETQ from https://github.com/NetEase-FuXi/EETQ" ) elif quantize == "bitsandbytes": warn_deprecate_bnb() linear = Linear8bitLt( weight, bias, has_fp16_weights=False, threshold=6.0, ) if bias is not None: linear.bias = nn.Parameter(bias) elif quantize == "bitsandbytes-fp4": linear = Linear4bit( weight, bias, quant_type="fp4", ) elif quantize == "bitsandbytes-nf4": linear = Linear4bit( weight, bias, quant_type="nf4", ) elif quantize == "gptq": try: qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight except Exception: raise NotImplementedError( f"The passed weight is not `gptq` compatible, loader needs to be updated." ) if use_exllama: linear = ExllamaQuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize ) else: linear = QuantLinear( qweight, qzeros, scales, g_idx, bias, bits, groupsize, ) elif quantize == "awq": try: qweight, qzeros, scales, _, bits, groupsize, _ = weight except Exception: raise NotImplementedError( f"The passed weight is not `awq` compatible, loader needs to be updated." ) if IS_ROCM_SYSTEM: raise NotImplementedError( "AWQ GEMM kernel can't be used on ROCm systems, please use `--quantize gptq` instead " "to use Exllama/GPTQ kernels for AWQ inference." ) if not HAS_AWQ: raise NotImplementedError( "You do not seem to have awq installed, either install it (cd server && make install-awq), or try using GPTQ `---quantize gptq` a conversion AWQ->GPTQ will happen on the fly" ) linear = WQLinear( w_bit=bits, group_size=groupsize, qweight=qweight, qzeros=qzeros, scales=scales, bias=bias is not None, ) else: raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.") return linear class TensorParallelRowLinear(SuperLayer): def __init__(self, linear, process_group): super().__init__(linear) self.process_group = process_group def load(cls, config, prefix: str, weights, bias: bool): weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return cls( get_linear(weight, bias, config.quantize), process_group=weights.process_group, ) def forward(self, input: torch.Tensor, reduce: bool = True) -> torch.Tensor: out = super().forward(input) if self.process_group.size() > 1 and reduce: torch.distributed.all_reduce(out, group=self.process_group) return out def load_row(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=0).T else: weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return TensorParallelRowLinear( get_linear(weight, bias, config.quantize), process_group=weights.process_group )
null
2,154
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" w = [ weights.get_sharded(f"{p}.bias", dim=0) for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"] ] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) return TensorParallelColumnLinear( get_linear(weight, bias=bias, quantize=config.quantize) ) class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=True, )
null
2,155
from typing import Callable, List, Optional, Union from urllib.parse import urlparse from transformers.feature_extraction_utils import BatchFeature from transformers.processing_utils import ProcessorMixin from transformers.tokenization_utils_base import ( BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy, ) from transformers.utils import TensorType, is_torch_available from text_generation_server.models.custom_modeling.idefics_image_processing import ( IdeficsImageProcessor, ) def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1): # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]] # If any of images index are more than num_classes, set them to -1. # Words after the max number of images allowed have been seen don't attend on anything if num_classes != -1: incremental_mask[incremental_mask >= num_classes] = -1 negatives = incremental_mask == -1 incremental_mask[negatives] = 0 attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes) attn_mask[negatives, :] = 0 return attn_mask
null
2,156
from typing import Callable, List, Optional, Union from urllib.parse import urlparse from transformers.feature_extraction_utils import BatchFeature from transformers.processing_utils import ProcessorMixin from transformers.tokenization_utils_base import ( BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy, ) from transformers.utils import TensorType, is_torch_available from text_generation_server.models.custom_modeling.idefics_image_processing import ( IdeficsImageProcessor, ) IMAGE_TOKEN = "<image>" def image_attention_mask_for_packed_input_ids(input_ids, tokenizer): image_attention_mask = torch.full_like(input_ids, fill_value=-1) next_image_attention_mask = torch.full_like(input_ids, fill_value=-1) image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN) eod_token_id = tokenizer.eos_token_id for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx, token_id in enumerate(input_ids[batch_idx]): if token_id == image_token_id: count += 1 image_attention_mask[batch_idx][idx] = count seen_eod = False else: image_attention_mask[batch_idx][idx] = count if seen_eod: image_attention_mask[batch_idx][idx] = -1 if token_id == eod_token_id: seen_eod = True for batch_idx in range(input_ids.size(0)): count = -1 seen_eod = False for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1): token_id = input_ids[batch_idx][idx] if token_id == image_token_id: count += 1 next_image_attention_mask[batch_idx][idx] = count seen_eod = False else: next_image_attention_mask[batch_idx][idx] = count if token_id == eod_token_id: seen_eod = True if seen_eod: next_image_attention_mask[batch_idx][idx] = -1 non_negative_indices = next_image_attention_mask[batch_idx] != -1 next_image_attention_mask[batch_idx][non_negative_indices] -= count next_image_attention_mask[batch_idx][non_negative_indices] *= -1 return image_attention_mask, next_image_attention_mask
null
2,157
from typing import Callable, List, Optional, Union from urllib.parse import urlparse from transformers.feature_extraction_utils import BatchFeature from transformers.processing_utils import ProcessorMixin from transformers.tokenization_utils_base import ( BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy, ) from transformers.utils import TensorType, is_torch_available from text_generation_server.models.custom_modeling.idefics_image_processing import ( IdeficsImageProcessor, ) def is_url(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" if " " in string: return False result = urlparse(string) return all([result.scheme, result.netloc]) The provided code snippet includes necessary dependencies for implementing the `is_image` function. Write a Python function `def is_image(string)` to solve the following problem: Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url Here is the function: def is_image(string): """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url""" return is_url(string) or string.startswith("data:")
Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately invalidated the url
2,158
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear( get_linear(weight, bias=None, quantize=config.quantize) ) class TensorParallelColumnLinear(SuperLayer): def load_qkv(cls, config, prefix: str, weights, bias: bool): """Specific method when the QKV was joined after the fact""" weight = weights.get_weights_col_packed_qkv(prefix, quantize=config.quantize) if bias: raise NotImplementedError("packed_qkv only implemented for baichuan") else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load(cls, config, prefix: str, weights, bias: bool): return cls.load_multi(config, [prefix], weights, bias, dim=0) def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int): weight = weights.get_multi_weights_col( prefixes, quantize=config.quantize, dim=dim ) if bias: b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes] bias = torch.cat(b, dim=dim) else: bias = None linear = get_linear(weight, bias, config.quantize) return cls(linear) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: if config.model_type == "baichuan": return TensorParallelColumnLinear.load_qkv( config, prefix=f"{prefix}.W_pack", weights=weights, bias=False, ) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, )
null