code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ , snake_case_ : Optional[Any] = image.size
snake_case_ , snake_case_ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case_ : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
snake_case_ : int = np.array(_UpperCamelCase ).astype(np.floataa ) / 255.0
snake_case_ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
snake_case_ : Tuple = torch.from_numpy(_UpperCamelCase )
return 2.0 * image - 1.0
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ )
@torch.no_grad()
def __call__(self , __magic_name__ = None , __magic_name__ = 1 , __magic_name__ = 100 , __magic_name__ = 0.0 , __magic_name__ = None , __magic_name__ = "pil" , __magic_name__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(__magic_name__ , PIL.Image.Image ):
snake_case_ : Tuple = 1
elif isinstance(__magic_name__ , torch.Tensor ):
snake_case_ : List[str] = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__magic_name__ )}''' )
if isinstance(__magic_name__ , PIL.Image.Image ):
snake_case_ : Optional[int] = preprocess(__magic_name__ )
snake_case_ , snake_case_ : Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
snake_case_ : int = (batch_size, self.unet.config.in_channels // 2, height, width)
snake_case_ : Optional[Any] = next(self.unet.parameters() ).dtype
snake_case_ : Tuple = randn_tensor(__magic_name__ , generator=__magic_name__ , device=self.device , dtype=__magic_name__ )
snake_case_ : str = image.to(device=self.device , dtype=__magic_name__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__magic_name__ , device=self.device )
snake_case_ : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
snake_case_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case_ : List[str] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case_ : Optional[Any] = {}
if accepts_eta:
snake_case_ : int = eta
for t in self.progress_bar(__magic_name__ ):
# concat latents and low resolution image in the channel dimension.
snake_case_ : str = torch.cat([latents, image] , dim=1 )
snake_case_ : Tuple = self.scheduler.scale_model_input(__magic_name__ , __magic_name__ )
# predict the noise residual
snake_case_ : Optional[int] = self.unet(__magic_name__ , __magic_name__ ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ : Dict = self.scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
# decode the image latents with the VQVAE
snake_case_ : Union[str, Any] = self.vqvae.decode(__magic_name__ ).sample
snake_case_ : str = torch.clamp(__magic_name__ , -1.0 , 1.0 )
snake_case_ : Union[str, Any] = image / 2 + 0.5
snake_case_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ : Optional[Any] = self.numpy_to_pil(__magic_name__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__magic_name__ )
| 60 |
"""simple docstring"""
from collections import defaultdict
def a_ ( lowercase__ :int ):
__lowerCamelCase = 1
__lowerCamelCase = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowercase__ )
if ret % 2 == 0:
cuts.append(lowercase__ )
return ret
def a_ ( ):
dfs(1 )
if __name__ == "__main__":
__magic_name__ , __magic_name__ : Tuple = 1_0, 9
__magic_name__ : Tuple = defaultdict(list)
__magic_name__ : dict[int, bool] = {}
__magic_name__ : list[int] = []
__magic_name__ : List[str] = 0
__magic_name__ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 281 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> str:
lowerCAmelCase_ : List[str] = 1.5
lowerCAmelCase_ : List[Any] = int(factor * num_class_images )
lowerCAmelCase_ : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=lowerCAmelCase_ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowerCAmelCase_ : Any = client.query(text=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowerCAmelCase_ : str = int(factor * num_images )
lowerCAmelCase_ : Tuple = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=lowerCAmelCase_ , aesthetic_weight=0.1 , )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Tuple = tqdm(desc='''downloading real regularization images''' , total=lowerCAmelCase_ )
with open(f"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(f"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
f"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowerCAmelCase_ : Optional[Any] = class_images[count]
count += 1
try:
lowerCAmelCase_ : Optional[Any] = requests.get(images['''url'''] )
if img.status_code == 200:
lowerCAmelCase_ : Union[str, Any] = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCAmelCase ( )-> Union[str, Any]:
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser('''''' , add_help=lowerCAmelCase_ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=lowerCAmelCase_ , type=lowerCAmelCase_ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=200 , type=lowerCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : List[str] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images) | 710 |
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 619 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A = None , ):
super().__init__()
self.register_modules(transformer=__A , vae=__A , scheduler=__A )
# create a imagenet -> id dictionary for easier use
__a = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
__a = int(__A )
__a = dict(sorted(self.labels.items() ) )
def snake_case_ ( self , __A ):
if not isinstance(__A , __A ):
__a = list(__A )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __A , __A = 4.0 , __A = None , __A = 50 , __A = "pil" , __A = True , ):
__a = len(__A )
__a = self.transformer.config.sample_size
__a = self.transformer.config.in_channels
__a = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__A , device=self.device , dtype=self.transformer.dtype , )
__a = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__a = torch.tensor(__A , device=self.device ).reshape(-1 )
__a = torch.tensor([1000] * batch_size , device=self.device )
__a = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__a = latent_model_input[: len(__A ) // 2]
__a = torch.cat([half, half] , dim=0 )
__a = self.scheduler.scale_model_input(__A , __A )
__a = t
if not torch.is_tensor(__A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__a = latent_model_input.device.type == """mps"""
if isinstance(__A , __A ):
__a = torch.floataa if is_mps else torch.floataa
else:
__a = torch.intaa if is_mps else torch.intaa
__a = torch.tensor([timesteps] , dtype=__A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__a = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__a = self.transformer(
__A , timestep=__A , class_labels=__A ).sample
# perform guidance
if guidance_scale > 1:
__a , __a = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__a , __a = torch.split(__A , len(__A ) // 2 , dim=0 )
__a = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__a = torch.cat([half_eps, half_eps] , dim=0 )
__a = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__a , __a = torch.split(__A , __A , dim=1 )
else:
__a = noise_pred
# compute previous image: x_t -> x_t-1
__a = self.scheduler.step(__A , __A , __A ).prev_sample
if guidance_scale > 1:
__a , __a = latent_model_input.chunk(2 , dim=0 )
else:
__a = latent_model_input
__a = 1 / self.vae.config.scaling_factor * latents
__a = self.vae.decode(__A ).sample
__a = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__A )
| 99 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = DPTConfig()
if "large" in checkpoint_url:
A_ : List[Any] = 1_024
A_ : int = 4_096
A_ : List[str] = 24
A_ : List[str] = 16
A_ : int = [5, 11, 17, 23]
A_ : str = [256, 512, 1_024, 1_024]
A_ : str = (1, 384, 384)
if "ade" in checkpoint_url:
A_ : Tuple = True
A_ : Optional[Any] = 150
A_ : str = """huggingface/label-files"""
A_ : List[Any] = """ade20k-id2label.json"""
A_ : List[str] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" ) ) , """r""" ) )
A_ : Dict = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : int = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A_ : Any = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
A_ : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
A_ : List[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
A_ : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
A_ : Tuple = name.replace("""proj""" , """projection""" )
if "blocks" in name:
A_ : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
A_ : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
A_ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
A_ : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
A_ : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
A_ : Any = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
A_ : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
A_ : int = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
A_ : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
A_ : Dict = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A_ : Tuple = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
A_ : int = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
A_ : str = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
A_ : Optional[int] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
A_ : Any = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
A_ : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A_ : int = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A_ : Union[str, Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A_ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A_ : Optional[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A_ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A_ : Any = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A_ : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A_ : Dict = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A_ : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
A_ : str = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
A_ : Any = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
A_ : Optional[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
A_ : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __UpperCamelCase ( snake_case__ , snake_case__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Union[str, Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
A_ : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[: config.hidden_size, :]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : int = in_proj_weight[
-config.hidden_size :, :
]
A_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
A_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ , A_ : int = get_dpt_config(snake_case__ )
# load original state_dict from URL
A_ : Dict = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
A_ : str = state_dict.pop(snake_case__ )
A_ : str = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
A_ : Union[str, Any] = DPTForSemanticSegmentation(snake_case__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
A_ : str = 480 if """ade""" in checkpoint_url else 384
A_ : List[str] = DPTImageProcessor(size=snake_case__ )
A_ : Dict = prepare_img()
A_ : List[Any] = image_processor(snake_case__ , return_tensors="""pt""" )
# forward pass
A_ : Tuple = model(**snake_case__ ).logits if """ade""" in checkpoint_url else model(**snake_case__ ).predicted_depth
# Assert logits
A_ : List[str] = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
A_ : str = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(snake_case__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case__ )
)
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
_lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 180 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ , lowercase__ : str = [], []
while len(UpperCAmelCase ) > 1:
lowercase__ , lowercase__ : Optional[Any] = min(UpperCAmelCase ), max(UpperCAmelCase )
start.append(UpperCAmelCase )
end.append(UpperCAmelCase )
collection.remove(UpperCAmelCase )
collection.remove(UpperCAmelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__a: str = input("""Enter numbers separated by a comma:\n""").strip()
__a: Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 428 | '''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_CAUSAL_LM_MAPPING
SCREAMING_SNAKE_CASE = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowercase__ : Tuple = text_generator('''This is a test''' , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowercase__ : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
__lowerCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowercase__ : List[str] = text_generator('''This is a test''' , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
] , )
lowercase__ : Any = text_generator.model.config.eos_token_id
lowercase__ : List[Any] = '''<pad>'''
lowercase__ : List[str] = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
],
[
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
{'''generated_token_ids''': ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCAmelCase( self ) -> str:
lowercase__ : Union[str, Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowercase__ : Any = text_generator('''This is a test''' , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowercase__ : int = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Tuple = '''Hello I believe in'''
lowercase__ : Optional[int] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowercase__ : Tuple = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowercase__ : str = text_generator(__lowerCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Tuple = text_generator.model
lowercase__ : Dict = text_generator.tokenizer
lowercase__ : List[str] = text_generator('''This is a test''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase__ : Any = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase__ : List[str] = pipeline(task='''text-generation''' , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
lowercase__ : Tuple = text_generator('''This is a test''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowercase__ : List[str] = text_generator('''This is a test''' , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowercase__ : Tuple = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowercase__ : Optional[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
[{'''generated_text''': ANY(__lowerCAmelCase )}, {'''generated_text''': ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Union[str, Any] = text_generator('''test''' , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = text_generator('''test''' , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowercase__ : List[str] = text_generator('''''' )
self.assertEqual(__lowerCAmelCase , [{'''generated_text''': ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowercase__ : Dict = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowercase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
lowercase__ : str = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase( self ) -> Optional[int]:
import torch
# Classic `model_kwargs`
lowercase__ : str = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase__ : Optional[int] = pipe('''This is a test''' )
self.assertEqual(
__lowerCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowercase__ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowercase__ : int = pipe('''This is a test''' )
self.assertEqual(
__lowerCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowercase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowercase__ : Optional[int] = pipe('''This is a test''' )
self.assertEqual(
__lowerCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCAmelCase( self ) -> Dict:
import torch
lowercase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCAmelCase( self ) -> List[str]:
import torch
lowercase__ : int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = '''Hello world'''
lowercase__ : Optional[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowercase__ : List[str] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowercase__ : List[str] = logging.get_logger('''transformers.generation.utils''' )
lowercase__ : Any = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
lowercase__ : Dict = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
lowercase__ : Union[str, Any] = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
lowercase__ : str = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 428 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase_ , 'num_heads' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any]=1_3 , lowerCAmelCase_ : List[Any]=6_4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : int=[1_6, 4_8, 9_6] , lowerCAmelCase_ : Optional[Any]=[1, 3, 6] , lowerCAmelCase_ : Dict=[1, 2, 1_0] , lowerCAmelCase_ : str=[7, 3, 3] , lowerCAmelCase_ : int=[4, 2, 2] , lowerCAmelCase_ : Union[str, Any]=[2, 1, 1] , lowerCAmelCase_ : Union[str, Any]=[2, 2, 2] , lowerCAmelCase_ : Dict=[False, False, True] , lowerCAmelCase_ : Union[str, Any]=[0.0, 0.0, 0.0] , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[int]=1e-12 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=2 , ) -> str:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_sizes
__lowerCAmelCase = patch_stride
__lowerCAmelCase = patch_padding
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = num_heads
__lowerCAmelCase = stride_kv
__lowerCAmelCase = depth
__lowerCAmelCase = cls_token
__lowerCAmelCase = attention_drop_rate
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Tuple ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = TFCvtModel(config=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , training=lowerCAmelCase_ )
__lowerCAmelCase = (self.image_size, self.image_size)
__lowerCAmelCase , __lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFCvtForImageClassification(lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[int] ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = TFCvtModelTester(self )
__lowerCAmelCase = TFCvtConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Dict ) -> str:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def lowercase ( self : Tuple ) -> Optional[Any]:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowercase ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowercase ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def lowercase ( self : List[str] ) -> Optional[int]:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(lowerCAmelCase_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def lowercase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : str ) -> str:
def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Optional[int] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Dict ) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = TFCvtModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> Any:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='tf' )
# forward pass
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = tf.constant([0.92_85, 0.90_15, -0.31_50] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 | 1 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( a , a=1 ):
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( a , a=0 ):
__snake_case = []
for old_item in old_list:
__snake_case = old_item.replace('in_layers.0' , 'norm1' )
__snake_case = new_item.replace('in_layers.2' , 'conv1' )
__snake_case = new_item.replace('out_layers.0' , 'norm2' )
__snake_case = new_item.replace('out_layers.3' , 'conv2' )
__snake_case = new_item.replace('emb_layers.1' , 'time_emb_proj' )
__snake_case = new_item.replace('skip_connection' , 'conv_shortcut' )
__snake_case = shave_segments(a , n_shave_prefix_segments=a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCamelCase__ ( a , a=0 ):
__snake_case = []
for old_item in old_list:
__snake_case = old_item
__snake_case = new_item.replace('norm.weight' , 'group_norm.weight' )
__snake_case = new_item.replace('norm.bias' , 'group_norm.bias' )
__snake_case = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
__snake_case = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
__snake_case = shave_segments(a , n_shave_prefix_segments=a )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None ):
assert isinstance(a , a ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__snake_case = old_checkpoint[path]
__snake_case = old_tensor.shape[0] // 3
__snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
__snake_case = old_tensor.shape[0] // config['num_head_channels'] // 3
__snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
__snake_case , __snake_case , __snake_case = old_tensor.split(channels // num_heads , dim=1 )
__snake_case = query.reshape(a )
__snake_case = key.reshape(a )
__snake_case = value.reshape(a )
for path in paths:
__snake_case = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__snake_case = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
__snake_case = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
__snake_case = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
__snake_case = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__snake_case = old_checkpoint[path['old']][:, :, 0]
else:
__snake_case = old_checkpoint[path['old']]
def lowerCamelCase__ ( a , a ):
__snake_case = {}
__snake_case = checkpoint['time_embed.0.weight']
__snake_case = checkpoint['time_embed.0.bias']
__snake_case = checkpoint['time_embed.2.weight']
__snake_case = checkpoint['time_embed.2.bias']
__snake_case = checkpoint['input_blocks.0.0.weight']
__snake_case = checkpoint['input_blocks.0.0.bias']
__snake_case = checkpoint['out.0.weight']
__snake_case = checkpoint['out.0.bias']
__snake_case = checkpoint['out.2.weight']
__snake_case = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(a )
}
# Retrieves the keys for the middle blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(a )
}
# Retrieves the keys for the output blocks only
__snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
__snake_case = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(a )
}
for i in range(1 , a ):
__snake_case = (i - 1) // (config['num_res_blocks'] + 1)
__snake_case = (i - 1) % (config['num_res_blocks'] + 1)
__snake_case = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
__snake_case = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
__snake_case = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
__snake_case = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
__snake_case = renew_resnet_paths(a )
__snake_case = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__snake_case = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path, resnet_op] , config=a )
if len(a ):
__snake_case = renew_attention_paths(a )
__snake_case = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__snake_case = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path] , attention_paths_to_split=a , config=a , )
__snake_case = middle_blocks[0]
__snake_case = middle_blocks[1]
__snake_case = middle_blocks[2]
__snake_case = renew_resnet_paths(a )
assign_to_checkpoint(a , a , a , config=a )
__snake_case = renew_resnet_paths(a )
assign_to_checkpoint(a , a , a , config=a )
__snake_case = renew_attention_paths(a )
__snake_case = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
a , a , a , attention_paths_to_split=a , config=a )
for i in range(a ):
__snake_case = i // (config['num_res_blocks'] + 1)
__snake_case = i % (config['num_res_blocks'] + 1)
__snake_case = [shave_segments(a , 2 ) for name in output_blocks[i]]
__snake_case = {}
for layer in output_block_layers:
__snake_case , __snake_case = layer.split('.' )[0], shave_segments(a , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a )
else:
__snake_case = [layer_name]
if len(a ) > 1:
__snake_case = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
__snake_case = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
__snake_case = renew_resnet_paths(a )
__snake_case = renew_resnet_paths(a )
__snake_case = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
if ["conv.weight", "conv.bias"] in output_block_list.values():
__snake_case = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
__snake_case = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
__snake_case = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(a ) == 2:
__snake_case = []
if len(a ):
__snake_case = renew_attention_paths(a )
__snake_case = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__snake_case = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
a , a , a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=a , )
else:
__snake_case = renew_resnet_paths(a , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
__snake_case = '.'.join(['output_blocks', str(a ), path['old']] )
__snake_case = '.'.join(['up_blocks', str(a ), 'resnets', str(a ), path['new']] )
__snake_case = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase = parser.parse_args()
_lowercase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase = json.loads(f.read())
_lowercase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 427 |
'''simple docstring'''
from __future__ import annotations
_lowercase = """Muhammad Umer Farooq"""
_lowercase = """MIT"""
_lowercase = """1.0.0"""
_lowercase = """Muhammad Umer Farooq"""
_lowercase = """contact@muhammadumerfarooq.me"""
_lowercase = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a_ ( UpperCAmelCase__ ):
def __init__( self : int , __lowerCAmelCase : str ):
super().__init__()
__snake_case = []
__snake_case = domain
def lowercase__ ( self : int , __lowerCAmelCase : str , __lowerCAmelCase : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__snake_case = parse.urljoin(self.domain , __lowerCAmelCase )
self.urls.append(__lowerCAmelCase )
def lowerCamelCase__ ( a ):
return ".".join(get_sub_domain_name(a ).split('.' )[-2:] )
def lowerCamelCase__ ( a ):
return parse.urlparse(a ).netloc
def lowerCamelCase__ ( a = "https://github.com" ):
__snake_case = get_domain_name(a )
# Initialize the parser
__snake_case = Parser(a )
try:
# Open URL
__snake_case = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__snake_case = requests.get(a )
# Get the valid email.
__snake_case = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
_lowercase = emails_from_url("""https://github.com""")
print(f'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 427 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= 'mobilenet_v2'
def __init__( self : Optional[Any] , _lowercase : List[Any]=3 , _lowercase : Dict=2_24 , _lowercase : str=1.0 , _lowercase : Any=8 , _lowercase : Any=8 , _lowercase : Tuple=6 , _lowercase : Optional[Any]=32 , _lowercase : List[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]="relu6" , _lowercase : Tuple=True , _lowercase : List[str]=0.8 , _lowercase : List[Any]=0.0_2 , _lowercase : Union[str, Any]=0.0_0_1 , _lowercase : Optional[int]=2_55 , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = depth_multiplier
UpperCAmelCase__ = depth_divisible_by
UpperCAmelCase__ = min_depth
UpperCAmelCase__ = expand_ratio
UpperCAmelCase__ = output_stride
UpperCAmelCase__ = first_layer_is_expansion
UpperCAmelCase__ = finegrained_output
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = tf_padding
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = semantic_loss_ignore_index
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
A__= version.parse('1.11' )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return 1E-4
| 475 |
import math
import unittest
def __UpperCAmelCase ( __A ) -> bool:
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowercase__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaises(_lowercase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , )
self.assertFalse(
is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 475 | 1 |
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def snake_case__ ( _A: Any="ro" , _A: Optional[int]="en" , _A: str="wmt16" , _A: Optional[int]=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase = f"{src_lang}-{tgt_lang}"
print(f"Converting {dataset}-{pair}" )
lowerCAmelCase = datasets.load_dataset(__UpperCamelCase , __UpperCamelCase )
if save_dir is None:
lowerCAmelCase = f"{dataset}-{pair}"
lowerCAmelCase = Path(__UpperCamelCase )
save_dir.mkdir(exist_ok=__UpperCamelCase )
for split in ds.keys():
print(f"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase = """val""" if split == """validation""" else split
lowerCAmelCase = save_dir.joinpath(f"{fn}.source" )
lowerCAmelCase = save_dir.joinpath(f"{fn}.target" )
lowerCAmelCase = src_path.open("""w+""" )
lowerCAmelCase = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 708 | '''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( _A: float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_A , 0 , _A , args=(_A) )[0]
def snake_case__ ( _A: float , _A: float ) -> float:
'''simple docstring'''
return math.pow(_A , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 605 | 0 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCamelCase_ = True
except ImportError:
UpperCamelCase_ = False
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( UpperCAmelCase__ : Namespace ) -> int:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@staticmethod
def __lowerCamelCase ( UpperCamelCase_ : ArgumentParser ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Any = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=UpperCamelCase_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=UpperCamelCase_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=UpperCamelCase_ )
def __init__( self : Optional[int] , UpperCamelCase_ : bool , UpperCamelCase_ : str , UpperCamelCase_ : Dict=None , *UpperCamelCase_ : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = testing
SCREAMING_SNAKE_CASE__ :Optional[int] = testing_file
SCREAMING_SNAKE_CASE__ :Dict = path
def __lowerCamelCase ( self : str ) -> Tuple:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
SCREAMING_SNAKE_CASE__ :Optional[int] = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(UpperCamelCase_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
Path(UpperCamelCase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
SCREAMING_SNAKE_CASE__ :Any = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(UpperCamelCase_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE__ :List[Any] = json.load(UpperCamelCase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=UpperCamelCase_ , extra_context=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ :Any = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE__ :Any = json.load(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = configuration['lowercase_modelname']
SCREAMING_SNAKE_CASE__ :Union[str, Any] = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
SCREAMING_SNAKE_CASE__ :Dict = 'PyTorch' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE__ :List[str] = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE__ :Tuple = 'Flax' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE__ :Any = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=UpperCamelCase_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(UpperCamelCase_ : Optional[int] ):
with open(UpperCamelCase_ , 'r' ) as f:
SCREAMING_SNAKE_CASE__ :Dict = f.readlines()
with open(UpperCamelCase_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(UpperCamelCase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : List[str] ):
# Create temp file
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :str = mkstemp()
SCREAMING_SNAKE_CASE__ :Any = False
with fdopen(UpperCamelCase_ , 'w' ) as new_file:
with open(UpperCamelCase_ ) as old_file:
for line in old_file:
new_file.write(UpperCamelCase_ )
if line_to_copy_below in line:
SCREAMING_SNAKE_CASE__ :Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(UpperCamelCase_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(UpperCamelCase_ , UpperCamelCase_ )
# Remove original file
remove(UpperCamelCase_ )
# Move new file
move(UpperCamelCase_ , UpperCamelCase_ )
def skip_units(UpperCamelCase_ : List[str] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(UpperCamelCase_ : Optional[int] ):
with open(UpperCamelCase_ ) as datafile:
SCREAMING_SNAKE_CASE__ :Dict = []
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :Tuple = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
SCREAMING_SNAKE_CASE__ :List[Any] = line.split('"' )[1]
SCREAMING_SNAKE_CASE__ :Dict = skip_units(UpperCamelCase_ )
elif "# Below: " in line and "##" not in line:
SCREAMING_SNAKE_CASE__ :List[Any] = line.split('"' )[1]
SCREAMING_SNAKE_CASE__ :Dict = skip_units(UpperCamelCase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = []
elif "# Replace with" in line and "##" not in line:
SCREAMING_SNAKE_CASE__ :List[str] = []
elif "##" not in line:
lines_to_copy.append(UpperCamelCase_ )
remove(UpperCamelCase_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(UpperCamelCase_ )
| 209 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
'''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''],
'''tokenization_m2m_100''': ['''M2M100Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''M2M100ForConditionalGeneration''',
'''M2M100Model''',
'''M2M100PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCAmelCase ( *lowerCamelCase__ : Dict, lowerCamelCase__ : Optional[Union[Dict, Any]] = None, lowerCamelCase__ : Optional[int]=True, lowerCamelCase__ : Dict=2 ) -> Dict:
from .. import __version__
_SCREAMING_SNAKE_CASE : int = take_from
_SCREAMING_SNAKE_CASE : Any = ()
if not isinstance(args[0], lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
_SCREAMING_SNAKE_CASE : List[str] = None
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase__ ),)
_SCREAMING_SNAKE_CASE : List[Any] = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowerCamelCase__, lowerCamelCase__ ):
values += (getattr(lowerCamelCase__, lowerCamelCase__ ),)
_SCREAMING_SNAKE_CASE : Optional[Any] = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_SCREAMING_SNAKE_CASE : int = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_SCREAMING_SNAKE_CASE : List[Any] = warning + " " if standard_warn else ""
warnings.warn(warning + message, lowerCamelCase__, stacklevel=lowerCamelCase__ )
if isinstance(lowerCamelCase__, lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
_SCREAMING_SNAKE_CASE : Dict = inspect.getouterframes(inspect.currentframe() )[1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.filename
_SCREAMING_SNAKE_CASE : List[str] = call_frame.lineno
_SCREAMING_SNAKE_CASE : List[Any] = call_frame.function
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowerCamelCase__ ) == 0:
return
elif len(lowerCamelCase__ ) == 1:
return values[0]
return values
| 295 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowercase_ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
A__ = 10000
A__ = None
A__ = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
A__ = ParquetConfig
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_SCREAMING_SNAKE_CASE : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE : int = data_files
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE : Optional[Any] = [dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_SCREAMING_SNAKE_CASE : Tuple = []
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , "rb" ) as f:
_SCREAMING_SNAKE_CASE : Optional[int] = datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE : Union[str, Any] = table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , "rb" ) as f:
_SCREAMING_SNAKE_CASE : List[Any] = pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_SCREAMING_SNAKE_CASE : int = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(snake_case__ )}: {e}''' )
raise
| 295 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case : int = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case : Optional[int] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class snake_case_ :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class snake_case_ :
def __init__( self :Optional[int] ,__snake_case :Iterable[int] ) -> None:
a__ = None
for i in sorted(__snake_case ,reverse=__snake_case ):
a__ = Node(__snake_case ,self.head )
def __iter__( self :Optional[Any] ) -> Iterator[int]:
a__ = self.head
while node:
yield node.data
a__ = node.next_node
def __len__( self :Union[str, Any] ) -> int:
return sum(1 for _ in self )
def __str__( self :Optional[Any] ) -> str:
return " -> ".join([str(__snake_case ) for node in self] )
def __lowercase ( __lowerCAmelCase : SortedLinkedList , __lowerCAmelCase : SortedLinkedList ):
return SortedLinkedList(list(__lowerCAmelCase ) + list(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 335 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , ):
if config_name_or_path is None:
a__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
a__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a__ = question_encoder_name_or_path
a__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
a__ = RagConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = gen_config
a__ = question_encoder_config
a__ = model_class.from_pretrained_question_encoder_generator(
__lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
rag_model.save_pretrained(__lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(__lowerCAmelCase )
# Save tokenizers.
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 335 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=3 , lowercase__=32 , lowercase__=3 , lowercase__=10 , lowercase__=[10, 20, 30, 40] , lowercase__=[1, 1, 2, 1] , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=3 , lowercase__=None , ) -> List[Any]:
"""simple docstring"""
_snake_case : Union[str, Any] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : Dict = num_channels
_snake_case : Optional[Any] = embeddings_size
_snake_case : List[Any] = hidden_sizes
_snake_case : List[str] = depths
_snake_case : str = is_training
_snake_case : int = use_labels
_snake_case : str = hidden_act
_snake_case : Optional[int] = num_labels
_snake_case : Any = scope
_snake_case : str = len(_snake_case )
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
_snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : List[Any] = None
if self.use_labels:
_snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_snake_case : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : List[Any] = TFResNetModel(config=_snake_case )
_snake_case : List[Any] = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = self.num_labels
_snake_case : List[str] = TFResNetForImageClassification(_snake_case )
_snake_case : Optional[int] = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case : Dict = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : str = config_and_inputs
_snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase (UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_lowercase : Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_lowercase : List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Any = False
_lowercase : Dict = False
_lowercase : Optional[Any] = False
_lowercase : str = False
_lowercase : str = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
_snake_case : str = TFResNetModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def UpperCAmelCase_ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
_snake_case , _snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : str = model_class(_snake_case )
_snake_case : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[int] = [*signature.parameters.keys()]
_snake_case : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
_snake_case : Tuple = model_class(_snake_case )
_snake_case : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_snake_case : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : int = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_snake_case : str = layer_type
_snake_case : Dict = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case : str = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase_ ( self ) -> str:
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = TFResNetModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _a ( ):
"""simple docstring"""
_snake_case : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_snake_case : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_snake_case : Optional[int] = self.default_image_processor
_snake_case : int = prepare_img()
_snake_case : Optional[Any] = image_processor(images=_snake_case , return_tensors='''tf''' )
# forward pass
_snake_case : Dict = model(**_snake_case )
# verify the logits
_snake_case : List[str] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_snake_case : Any = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
| 707 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> int:
"""simple docstring"""
_snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
_snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' )
_snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
_snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
_snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
_snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits
_snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean()
_snake_case : Tuple = -(labels.shape[-1] * loss.item())
_snake_case : Union[str, Any] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 47 | 0 |
import pprint
import requests
lowercase_ = 'https://zenquotes.io/api'
def UpperCamelCase__ ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCamelCase__ ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase_ = random_quotes()
pprint.pprint(response)
| 669 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A : Tuple = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
A : Union[str, Any] = max(
mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , )
A : List[str] = val
return f[i][j]
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A : Tuple = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A : str = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
A : Tuple = len(lowerCamelCase_ )
if num_items != len(lowerCamelCase_ ):
A : Union[str, Any] = (
'The number of weights must be the same as the number of values.\n'
F'But got {num_items} weights and {len(lowerCamelCase_ )} values'
)
raise ValueError(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
if not isinstance(wt[i] , lowerCamelCase_ ):
A : str = (
'All weights must be integers but got weight of '
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(lowerCamelCase_ )
A : List[Any] = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : set = set()
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return optimal_val, example_optional_set
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ )
else:
optimal_set.add(lowerCamelCase_ )
_construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ )
if __name__ == "__main__":
lowercase : Union[str, Any] = [3, 2, 4, 4]
lowercase : Optional[int] = [4, 3, 2, 3]
lowercase : Union[str, Any] = 4
lowercase : Any = 6
lowercase : Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase , lowercase : Optional[int] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase , lowercase : List[str] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 706 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A ( __snake_case , unittest.TestCase ):
__magic_name__ = CpmAntTokenizer
__magic_name__ = False
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
A : Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
A : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Union[str, Any] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
A : List[str] = '''今天天气真好!'''
A : Union[str, Any] = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
A : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Optional[Any] = '''今天天气真好!'''
A : List[str] = [tokenizer.bos_token] + tokens
A : int = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
A : str = tokenizer.decode(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 343 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowercase_ : List[str] = '''pytorch_model.bin'''
lowercase_ : Any = '''pytorch_model.bin.index.json'''
lowercase_ : Optional[Any] = '''adapter_config.json'''
lowercase_ : str = '''adapter_model.bin'''
lowercase_ : Any = '''adapter_model.safetensors'''
lowercase_ : Optional[Any] = '''tf_model.h5'''
lowercase_ : List[str] = '''tf_model.h5.index.json'''
lowercase_ : Optional[Any] = '''model.ckpt'''
lowercase_ : List[str] = '''flax_model.msgpack'''
lowercase_ : Any = '''flax_model.msgpack.index.json'''
lowercase_ : Optional[int] = '''model.safetensors'''
lowercase_ : Tuple = '''model.safetensors.index.json'''
lowercase_ : Optional[int] = '''config.json'''
lowercase_ : Union[str, Any] = '''preprocessor_config.json'''
lowercase_ : int = FEATURE_EXTRACTOR_NAME
lowercase_ : List[str] = '''generation_config.json'''
lowercase_ : List[str] = '''modelcard.json'''
lowercase_ : Optional[Any] = '''▁'''
lowercase_ : List[str] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowercase_ : Optional[Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowercase_ : Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowercase_ : List[str] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def A__( __lowerCAmelCase ):
if version.parse(__lowerCAmelCase ) < version.parse(__lowerCAmelCase ):
if "dev" in min_version:
_snake_case : List[Any] = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
_snake_case : Tuple = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 304 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__( __lowerCAmelCase ):
_snake_case : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase ):
_snake_case , _snake_case : Any = emb.weight.shape
_snake_case : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_snake_case : Optional[int] = emb.weight.data
return lin_layer
def A__( __lowerCAmelCase ):
_snake_case : List[str] = torch.load(__lowerCAmelCase , map_location='cpu' )
_snake_case : List[Any] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(__lowerCAmelCase )
_snake_case : List[Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Dict = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : Optional[Any] = XGLMConfig(
vocab_size=__lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : Optional[Any] = XGLMForCausalLM(__lowerCAmelCase )
_snake_case : Any = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
print(__lowerCAmelCase )
_snake_case : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : List[Any] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 304 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''encoder-decoder'''
_A : Optional[Any] = True
def __init__( self : List[str] , **__a : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(**__a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__lowercase : List[Any] = kwargs.pop("""encoder""" )
__lowercase : List[str] = encoder_config.pop("""model_type""" )
__lowercase : Optional[Any] = kwargs.pop("""decoder""" )
__lowercase : Optional[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__lowercase : int = AutoConfig.for_model(__a , **__a )
__lowercase : Dict = AutoConfig.for_model(__a , **__a )
__lowercase : Union[str, Any] = True
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Tuple ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
__lowercase : Dict = True
__lowercase : Tuple = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__a )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = copy.deepcopy(self.__dict__ )
__lowercase : Optional[Any] = self.encoder.to_dict()
__lowercase : Optional[int] = self.decoder.to_dict()
__lowercase : Tuple = self.__class__.model_type
return output | 712 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCamelCase : Any = None
try:
import msvcrt
except ImportError:
lowerCamelCase : str = None
try:
import fcntl
except ImportError:
lowerCamelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCamelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
lowerCamelCase : Tuple = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
lowerCamelCase : Tuple = '''3.0.12'''
lowerCamelCase : Any = None
def snake_case_ ( ):
global _logger
__lowercase : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Any , __a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = lock_file
return None
def __str__( self : str ) -> Any:
"""simple docstring"""
__lowercase : Any = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = lock
return None
def __enter__( self : Dict ) -> Dict:
"""simple docstring"""
return self.lock
def __exit__( self : Optional[int] , __a : Dict , __a : Any , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Any , __a : Dict=-1 , __a : Optional[Any]=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowercase : Dict = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
__lowercase : Optional[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowercase : int = None
# The default timeout value.
__lowercase : Optional[int] = timeout
# We use this lock primarily for the lock counter.
__lowercase : Optional[Any] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowercase : Union[str, Any] = 0
return None
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._lock_file
@property
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self._timeout
@timeout.setter
def lowerCAmelCase ( self : Tuple , __a : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = float(__a )
return None
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self._lock_file_fd is not None
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : Union[str, Any]=0.05 ) -> List[str]:
"""simple docstring"""
if timeout is None:
__lowercase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowercase : int = id(self )
__lowercase : Optional[Any] = self._lock_file
__lowercase : List[str] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowercase : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowercase : Optional[Any] = id(self )
__lowercase : str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
__lowercase : List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , __a : str , __a : int , __a : List[Any] ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.release(force=__a )
return None
def lowerCAmelCase ( self : Tuple , __a : str , __a : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
__lowercase : int = os.path.dirname(__a )
__lowercase : List[str] = str(hash(__a ) )
__lowercase : Optional[Any] = filename[: max_length - len(__a ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__a , __a )
else:
return path
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Optional[int]=-1 , __a : Tuple=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
__lowercase : Tuple = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowercase : Tuple = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
__lowercase : Union[str, Any] = fd
return None
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self._lock_file_fd
__lowercase : int = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[Any] , __a : str=-1 , __a : List[str]=None ) -> Any:
"""simple docstring"""
__lowercase : Dict = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowercase : List[str] = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
__lowercase : str = fd
return None
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self._lock_file_fd
__lowercase : List[str] = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowercase : Union[str, Any] = os.open(self._lock_file , __a )
except OSError:
pass
else:
__lowercase : Optional[int] = fd
return None
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
os.close(self._lock_file_fd )
__lowercase : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCamelCase : Optional[Any] = None
if msvcrt:
lowerCamelCase : List[Any] = WindowsFileLock
elif fcntl:
lowerCamelCase : List[Any] = UnixFileLock
else:
lowerCamelCase : Union[str, Any] = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''') | 649 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =field(default='''image-classification''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ =Features({'''image''': Image()} )
a__ =Features({'''labels''': ClassLabel} )
a__ ="image"
a__ ="labels"
def __lowerCAmelCase ( self , A ) -> List[Any]:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , A ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
_UpperCAmelCase : int = copy.deepcopy(self )
_UpperCAmelCase : str = self.label_schema.copy()
_UpperCAmelCase : Optional[Any] = features[self.label_column]
_UpperCAmelCase : int = label_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
return {
self.image_column: "image",
self.label_column: "labels",
}
| 506 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a ,unittest.TestCase ):
'''simple docstring'''
a__ =CodeGenTokenizer
a__ =CodeGenTokenizerFast
a__ =True
a__ ={'''add_prefix_space''': True}
a__ =False
def __lowerCAmelCase ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
_UpperCAmelCase : str = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Dict = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCAmelCase : Union[str, Any] = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCAmelCase ( self , **A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , **A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A )
def __lowerCAmelCase ( self , A ) -> Dict:
_UpperCAmelCase : List[Any] = '''lower newer'''
_UpperCAmelCase : Dict = '''lower newer'''
return input_text, output_text
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : int = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Optional[int] = '''lower newer'''
_UpperCAmelCase : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCAmelCase : List[Any] = tokenizer.tokenize(A , add_prefix_space=A )
self.assertListEqual(A , A )
_UpperCAmelCase : Dict = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : Dict = '''lower newer'''
# Testing tokenization
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(A , add_prefix_space=A )
_UpperCAmelCase : Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
# Testing conversion to ids without special tokens
_UpperCAmelCase : Dict = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# Testing conversion to ids with special tokens
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=A )
_UpperCAmelCase : List[str] = tokenizer.encode(A , add_prefix_space=A )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# Testing the unknown token
_UpperCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCAmelCase ( self , *A , **A ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __lowerCAmelCase ( self , A=1_5 ) -> List[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
_UpperCAmelCase : str = '''This is a simple input'''
_UpperCAmelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
_UpperCAmelCase : Optional[int] = '''This is a simple input'''
_UpperCAmelCase : Dict = ['''This is a simple input looooooooong''', '''This is a simple input''']
_UpperCAmelCase : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
_UpperCAmelCase : List[str] = tokenizer.pad_token_id
_UpperCAmelCase : Tuple = tokenizer(A , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' )
_UpperCAmelCase : Optional[Any] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
_UpperCAmelCase : int = tokenizer(*A , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' )
_UpperCAmelCase : List[str] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def __lowerCAmelCase ( self ) -> List[str]:
_UpperCAmelCase : Optional[int] = '''$$$'''
_UpperCAmelCase : Optional[int] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A )
_UpperCAmelCase : Tuple = '''This is a simple input'''
_UpperCAmelCase : int = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : List[str] = tokenizer.bos_token_id
_UpperCAmelCase : str = tokenizer(A )
_UpperCAmelCase : Optional[Any] = tokenizer(A )
self.assertEqual(out_s.input_ids[0] , A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_UpperCAmelCase : Tuple = tokenizer.decode(out_s.input_ids )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
_UpperCAmelCase : Any = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
_UpperCAmelCase : Union[str, Any] = '''\nif len_a > len_b: result = a\nelse: result = b'''
_UpperCAmelCase : Any = tokenizer.encode(A )
_UpperCAmelCase : Tuple = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
_UpperCAmelCase : List[str] = tokenizer.decode(A , truncate_before_pattern=A )
self.assertEqual(A , A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
| 506 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""image_processor""", """tokenizer"""]
snake_case_ : str = """ChineseCLIPImageProcessor"""
snake_case_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
_snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
| 716 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 198 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# Construct model
if openai_config_file == "":
snake_case__ = OpenAIGPTConfig()
else:
snake_case__ = OpenAIGPTConfig.from_json_file(__lowerCAmelCase )
snake_case__ = OpenAIGPTModel(__lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
snake_case__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
lowerCamelCase__ : Any = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 33 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
a_ : Any = BlenderbotSmallConfig
a_ : List[str] = {}
a_ : Any = '''gelu'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =bos_token_id
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__UpperCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__UpperCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase =prepare_blenderbot_small_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
return config, inputs_dict
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModel(config=UpperCAmelCase).get_decoder()
__UpperCAmelCase =inputs_dict['''input_ids''']
__UpperCAmelCase =input_ids[:1, :]
__UpperCAmelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase =inputs_dict['''head_mask''']
__UpperCAmelCase =1
# first forward pass
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__UpperCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1)
__UpperCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1)
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase)[0]
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__UpperCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1]))
__UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ) -> List[str]:
if attention_mask is None:
__UpperCAmelCase =tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : List[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : Tuple = True
a_ : List[str] = False
a_ : Union[str, Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
a_ : str = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
a_ : str = '''facebook/blenderbot_small-90M'''
@cached_property
def A__ (self):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer(self.src_text , return_tensors='''tf''')
__UpperCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase , )
__UpperCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 132 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
snake_case_ : Stack[int] = Stack()
snake_case_ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
snake_case_ : Any = operator_stack.peek()
operator_stack.pop()
snake_case_ : List[str] = operand_stack.peek()
operand_stack.pop()
snake_case_ : Tuple = operand_stack.peek()
operand_stack.pop()
snake_case_ : Any = operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 711 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: Optional[Any] = BertJapaneseTokenizer
__magic_name__: Optional[int] = False
__magic_name__: Optional[int] = True
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
snake_case_ : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : int , _A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case_ : Optional[int] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def UpperCAmelCase_ ( self : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.get_input_output_texts(_A )
snake_case_ : Any = tokenizer.encode(_A , add_special_tokens=_A )
snake_case_ : Union[str, Any] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
return text, ids
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = self.tokenizer_class(self.vocab_file )
snake_case_ : int = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(_A )
snake_case_ : str = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Optional[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Any = pickle.load(_A )
snake_case_ : Optional[Any] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : str = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
try:
snake_case_ : Dict = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
snake_case_ : Any = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = MecabTokenizer(do_lower_case=_A , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
try:
snake_case_ : Union[str, Any] = MecabTokenizer(
do_lower_case=_A , normalize_text=_A , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = MecabTokenizer(normalize_text=_A , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(_A )
snake_case_ : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Optional[int] = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Dict = pickle.load(_A )
snake_case_ : Any = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_sudachi
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Any ) -> int:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = SudachiTokenizer(do_lower_case=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = SudachiTokenizer(normalize_text=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : List[str] = SudachiTokenizer(trim_whitespace=_A , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(_A )
snake_case_ : Any = 'こんにちは、世界。\nこんばんは、世界。'
snake_case_ : Any = tokenizer.tokenize(_A )
self.assertListEqual(_A , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
snake_case_ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(_A , 'wb' ) as handle:
pickle.dump(_A , _A )
with open(_A , 'rb' ) as handle:
snake_case_ : Optional[Any] = pickle.load(_A )
snake_case_ : Union[str, Any] = tokenizer_new.tokenize(_A )
self.assertListEqual(_A , _A )
@require_jumanpp
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
snake_case_ : Dict = JumanppTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = JumanppTokenizer(normalize_text=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = JumanppTokenizer(trim_whitespace=_A )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
snake_case_ : List[Any] = {}
for i, token in enumerate(_A ):
snake_case_ : List[str] = i
snake_case_ : List[str] = WordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
snake_case_ : Optional[int] = tokenizer.subword_tokenizer
snake_case_ : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(_A , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
snake_case_ : Dict = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(_A , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
snake_case_ : Tuple = tokenizer.encode('ありがとう。' , add_special_tokens=_A )
snake_case_ : Optional[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=_A )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(_A )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BertJapaneseTokenizer
__magic_name__: int = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
snake_case_ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : List[Any] , **_A : Dict ) -> List[str]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **_A )
def UpperCAmelCase_ ( self : Dict , _A : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
snake_case_ : str = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
snake_case_ : int = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
_A , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
snake_case_ : Optional[Any] = {}
for i, token in enumerate(_A ):
snake_case_ : str = i
snake_case_ : Tuple = CharacterTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
snake_case_ : str = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
snake_case_ : Optional[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=_A )
snake_case_ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=_A )
snake_case_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_A )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(_A , _A )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'cl-tohoku/bert-base-japanese'
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
snake_case_ : int = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(_A )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 534 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : str = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
_UpperCAmelCase , _UpperCAmelCase : str = emb.weight.shape
_UpperCAmelCase : Optional[Any] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
_UpperCAmelCase : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = torch.load(snake_case__ , map_location='''cpu''' )
_UpperCAmelCase : int = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
_UpperCAmelCase : List[str] = mam_aaa['''model''']
remove_ignore_keys_(snake_case__ )
_UpperCAmelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_UpperCAmelCase : Union[str, Any] = MaMaaaConfig(
vocab_size=snake_case__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
_UpperCAmelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_UpperCAmelCase : Any = MaMaaaForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ , strict=snake_case__ )
_UpperCAmelCase : Union[str, Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCAmelCase :Optional[Any] = parser.parse_args()
_lowerCAmelCase :Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 506 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase (snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str] ) -> str:
'''simple docstring'''
with open(snake_case__ ) as metadata_file:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = LukeConfig(use_entity_aware_attention=snake_case__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )["""module"""]
# Load the entity vocab file
lowerCAmelCase = load_original_entity_vocab(snake_case__ )
# add an entry for [MASK2]
lowerCAmelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCAmelCase = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase = AddedToken("""<ent>""" , lstrip=snake_case__ , rstrip=snake_case__ )
lowerCAmelCase = AddedToken("""<ent2>""" , lstrip=snake_case__ , rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """r""" ) as f:
lowerCAmelCase = json.load(snake_case__ )
lowerCAmelCase = """MLukeTokenizer"""
with open(os.path.join(snake_case__ , """tokenizer_config.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
with open(os.path.join(snake_case__ , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""@"""] )[0]
lowerCAmelCase = tokenizer.convert_tokens_to_ids(["""#"""] )[0]
lowerCAmelCase = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase = word_emb[ent_init_index].unsqueeze(0 )
lowerCAmelCase = word_emb[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCAmelCase = state_dict[bias_name]
lowerCAmelCase = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCAmelCase = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCAmelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
lowerCAmelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCAmelCase = state_dict["""entity_predictions.bias"""]
lowerCAmelCase = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 )
lowerCAmelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCAmelCase = LukeForMaskedLM(config=snake_case__ ).eval()
state_dict.pop("""entity_predictions.decoder.weight""" )
state_dict.pop("""lm_head.decoder.weight""" )
state_dict.pop("""lm_head.decoder.bias""" )
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )):
lowerCAmelCase = state_dict[key]
else:
lowerCAmelCase = state_dict[key]
lowerCAmelCase , lowerCAmelCase = model.load_state_dict(snake_case__ , strict=snake_case__ )
if set(snake_case__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(snake_case__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ , task="""entity_classification""" )
lowerCAmelCase = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
lowerCAmelCase = (0, 9)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 33, 768) )
lowerCAmelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCAmelCase = torch.Size((1, 1, 768) )
lowerCAmelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case__ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCAmelCase = MLukeTokenizer.from_pretrained(snake_case__ )
lowerCAmelCase = """Tokyo is the capital of <mask>."""
lowerCAmelCase = (24, 30)
lowerCAmelCase = tokenizer(snake_case__ , entity_spans=[span] , return_tensors="""pt""" )
lowerCAmelCase = model(**snake_case__ )
lowerCAmelCase = encoding["""input_ids"""][0].tolist()
lowerCAmelCase = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) )
lowerCAmelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case__ )
lowerCAmelCase = outputs.entity_logits[0][0].argmax().item()
lowerCAmelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def lowercase (snake_case__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase = ["""[MASK]""", """[PAD]""", """[UNK]"""]
lowerCAmelCase = [json.loads(snake_case__ ) for line in open(snake_case__ )]
lowerCAmelCase = {}
for entry in data:
lowerCAmelCase = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCAmelCase = entity_id
break
lowerCAmelCase = f'''{language}:{entity_name}'''
lowerCAmelCase = entity_id
return new_mapping
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 169 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]:
UpperCAmelCase_ : int = []
if len(SCREAMING_SNAKE_CASE__ ) == 1:
return [nums.copy()]
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ : List[Any] = nums.pop(0 )
UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ )
for perm in permutations:
perm.append(SCREAMING_SNAKE_CASE__ )
result.extend(SCREAMING_SNAKE_CASE__ )
nums.append(SCREAMING_SNAKE_CASE__ )
return result
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
if start == len(SCREAMING_SNAKE_CASE__ ) - 1:
output.append(nums[:] )
else:
for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack
UpperCAmelCase_ : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Tuple = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 644 | 0 |
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class __a ( _snake_case ):
def __init__( self : List[Any] ,*lowerCamelCase : Tuple ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
super().__init__(*lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {}
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : List[Any] ,*lowerCamelCase : Any ,**lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().add_tokens(lowerCamelCase ,*lowerCamelCase ,**lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any] ,*lowerCamelCase : List[str] ,lowerCamelCase : Union[str, Any]=1 ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase ,*lowerCamelCase ,**lowerCamelCase )
output.append(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = placeholder_token + f"""_{i}"""
self.try_adding_tokens(lowerCamelCase ,*lowerCamelCase ,**lowerCamelCase )
output.append(lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
__SCREAMING_SNAKE_CASE = output
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any]=False ,lowerCamelCase : int=1.0 ):
'''simple docstring'''
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = []
for i in range(len(lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__SCREAMING_SNAKE_CASE = self.token_map[placeholder_token]
__SCREAMING_SNAKE_CASE = tokens[: 1 + int(len(lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
__SCREAMING_SNAKE_CASE = copy.copy(lowerCamelCase )
random.shuffle(lowerCamelCase )
__SCREAMING_SNAKE_CASE = text.replace(lowerCamelCase ,""" """.join(lowerCamelCase ) )
return text
def __call__( self : Optional[Any] ,lowerCamelCase : Any ,*lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=False ,lowerCamelCase : Any=1.0 ,**lowerCamelCase : Dict ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase ,vector_shuffle=lowerCamelCase ,prop_tokens_to_load=lowerCamelCase ) ,*lowerCamelCase ,**lowerCamelCase ,)
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Dict ,*lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any]=False ,lowerCamelCase : Optional[Any]=1.0 ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase ,vector_shuffle=lowerCamelCase ,prop_tokens_to_load=lowerCamelCase ) ,*lowerCamelCase ,**lowerCamelCase ,)
| 109 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self : Tuple , a : str , a : Union[str, Any]=1_3 , a : Tuple=3_2 , a : Optional[int]=2 , a : Tuple=3 , a : str=1_6 , a : int=[3_2, 6_4, 1_2_8] , a : Optional[int]=[1, 2, 1] , a : List[str]=[2, 2, 4] , a : Any=2 , a : Union[str, Any]=2.0 , a : int=True , a : Tuple=0.0 , a : Tuple=0.0 , a : Optional[int]=0.1 , a : Union[str, Any]="gelu" , a : Optional[Any]=False , a : List[Any]=True , a : Any=0.0_2 , a : int=1e-5 , a : int=True , a : Dict=None , a : str=True , a : Dict=1_0 , a : List[Any]=8 , a : Union[str, Any]=["stage1", "stage2"] , a : List[Any]=[1, 2] , ):
"""simple docstring"""
__snake_case : Union[str, Any] =parent
__snake_case : int =batch_size
__snake_case : str =image_size
__snake_case : Union[str, Any] =patch_size
__snake_case : Any =num_channels
__snake_case : str =embed_dim
__snake_case : List[Any] =hidden_sizes
__snake_case : List[str] =depths
__snake_case : str =num_heads
__snake_case : Union[str, Any] =window_size
__snake_case : Dict =mlp_ratio
__snake_case : List[Any] =qkv_bias
__snake_case : List[str] =hidden_dropout_prob
__snake_case : int =attention_probs_dropout_prob
__snake_case : Tuple =drop_path_rate
__snake_case : str =hidden_act
__snake_case : Union[str, Any] =use_absolute_embeddings
__snake_case : List[Any] =patch_norm
__snake_case : str =layer_norm_eps
__snake_case : Optional[Any] =initializer_range
__snake_case : Optional[int] =is_training
__snake_case : str =scope
__snake_case : Tuple =use_labels
__snake_case : List[Any] =type_sequence_label_size
__snake_case : Union[str, Any] =encoder_stride
__snake_case : Optional[Any] =out_features
__snake_case : Optional[int] =out_indices
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Optional[Any] =None
if self.use_labels:
__snake_case : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int =self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self : int ):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCamelCase ( self : Any , a : str , a : List[Any] , a : Tuple ):
"""simple docstring"""
__snake_case : Tuple =FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : Dict =model(UpperCamelCase__ )
__snake_case : int =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__snake_case : Optional[int] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCamelCase ( self : List[Any] , a : Any , a : Union[str, Any] , a : Tuple ):
"""simple docstring"""
__snake_case : Dict =FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : Dict =model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__snake_case : Union[str, Any] =None
__snake_case : Optional[Any] =FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : Optional[int] =model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCamelCase ( self : List[str] , a : Tuple , a : str , a : Optional[Any] ):
"""simple docstring"""
__snake_case : Optional[int] =FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : Any =model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__snake_case : int =1
__snake_case : List[Any] =FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : List[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int =model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCamelCase ( self : Any , a : Optional[int] , a : Any , a : Optional[Any] ):
"""simple docstring"""
__snake_case : str =self.type_sequence_label_size
__snake_case : Optional[Any] =FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : Union[str, Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Union[str, Any] =1
__snake_case : Any =FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__snake_case : List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : int =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[Any] =self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : int =config_and_inputs
__snake_case : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_a : Optional[int] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_a : Dict = False
_a : str = False
_a : List[Any] = False
_a : str = False
_a : Any = False
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : str =FocalNetModelTester(self )
__snake_case : Optional[int] =ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=3_7 , has_text_modality=UpperCamelCase__ )
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case , __snake_case : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__snake_case : Tuple =model_class(UpperCamelCase__ )
__snake_case : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any =[*signature.parameters.keys()]
__snake_case : Union[str, Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCamelCase ( self : Optional[int] , a : Optional[Any] , a : Any , a : List[Any] , a : str ):
"""simple docstring"""
__snake_case : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
__snake_case : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
__snake_case : Optional[Any] =outputs.hidden_states
__snake_case : str =getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# FocalNet has a different seq_length
__snake_case : Tuple =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : Union[str, Any] =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__snake_case : Union[str, Any] =outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
__snake_case , __snake_case , __snake_case , __snake_case : Dict =reshaped_hidden_states[0].shape
__snake_case : int =(
reshaped_hidden_states[0].view(UpperCamelCase__ , UpperCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[int] =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__snake_case : List[str] =True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] =True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] =3
__snake_case : Tuple =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__snake_case : int =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__snake_case : List[str] =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__snake_case : Union[str, Any] =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__snake_case : Union[str, Any] =True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[int] =True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@slow
def _UpperCamelCase ( self : str ):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict =FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case : Dict =self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Any =_config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
__snake_case : List[Any] =model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _lowercase ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : Tuple =FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase__ )
__snake_case : int =self.default_image_processor
__snake_case : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__snake_case : List[Any] =image_processor(images=UpperCamelCase__ , return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
__snake_case : str =model(**UpperCamelCase__ )
# verify the logits
__snake_case : int =torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
__snake_case : Optional[int] =torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
_a : Tuple = FocalNetConfig
_a : Dict = False
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =FocalNetModelTester(self )
| 700 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCamelCase_ : Optional[int] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCamelCase_ : List[Any] = """|""".join(sys.argv[1:])
UpperCamelCase_ : List[str] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
UpperCamelCase_ : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 497 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> str:
if n == 1 or not isinstance(_lowercase , _lowercase ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : str = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( _lowerCAmelCase : int ) -> Union[str, Any]:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : Optional[int] = 2
while digits < n:
index += 1
UpperCAmelCase : Optional[int] = len(str(fibonacci(_lowercase ) ) )
return index
def snake_case_ ( _lowerCAmelCase : int = 1000 ) -> Tuple:
return fibonacci_digits_index(_lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 127 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase_ ( _lowercase : Any , _lowercase : float = 0.0 , _lowercase : float = 1.0 ):
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : str ) -> YolosConfig:
_lowerCamelCase = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_lowerCamelCase = 1_92
_lowerCamelCase = 7_68
_lowerCamelCase = 12
_lowerCamelCase = 3
_lowerCamelCase = [8_00, 13_33]
_lowerCamelCase = False
elif yolos_name == "yolos_s_dWr":
_lowerCamelCase = 3_30
_lowerCamelCase = 14
_lowerCamelCase = 6
_lowerCamelCase = 13_20
elif "yolos_s" in yolos_name:
_lowerCamelCase = 3_84
_lowerCamelCase = 15_36
_lowerCamelCase = 12
_lowerCamelCase = 6
elif "yolos_b" in yolos_name:
_lowerCamelCase = [8_00, 13_44]
_lowerCamelCase = 91
_lowerCamelCase = 'huggingface/label-files'
_lowerCamelCase = 'coco-detection-id2label.json'
_lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase = idalabel
_lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( UpperCamelCase : dict , UpperCamelCase : YolosConfig , UpperCamelCase : bool = False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase = in_proj_bias[: config.hidden_size]
_lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase = in_proj_weight[-config.hidden_size :, :]
_lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCamelCase : str ) -> str:
if "backbone" in name:
_lowerCamelCase = name.replace('backbone' , 'vit' )
if "cls_token" in name:
_lowerCamelCase = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
_lowerCamelCase = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
_lowerCamelCase = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
_lowerCamelCase = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
_lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
_lowerCamelCase = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
_lowerCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
_lowerCamelCase = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
_lowerCamelCase = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
_lowerCamelCase = name.replace('vit.norm' , 'vit.layernorm' )
return name
def lowerCamelCase ( UpperCamelCase : dict , UpperCamelCase : YolosForObjectDetection ) -> dict:
for key in orig_state_dict.copy().keys():
_lowerCamelCase = orig_state_dict.pop(UpperCamelCase )
if "qkv" in key:
_lowerCamelCase = key.split('.' )
_lowerCamelCase = int(key_split[2] )
_lowerCamelCase = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_lowerCamelCase = val[:dim, :]
_lowerCamelCase = val[
dim : dim * 2, :
]
_lowerCamelCase = val[-dim:, :]
else:
_lowerCamelCase = val[:dim]
_lowerCamelCase = val[dim : dim * 2]
_lowerCamelCase = val[-dim:]
else:
_lowerCamelCase = val
return orig_state_dict
def lowerCamelCase ( ) -> torch.Tensor:
_lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool = False ) -> Tuple:
_lowerCamelCase = get_yolos_config(UpperCamelCase )
# load original state_dict
_lowerCamelCase = torch.load(UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
_lowerCamelCase = YolosForObjectDetection(UpperCamelCase )
model.eval()
_lowerCamelCase = convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
_lowerCamelCase = 8_00 if yolos_name != 'yolos_ti' else 5_12
_lowerCamelCase = YolosImageProcessor(format='coco_detection' , size=UpperCamelCase )
_lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCamelCase = model(**UpperCamelCase )
_lowerCamelCase , _lowerCamelCase = outputs.logits, outputs.pred_boxes
_lowerCamelCase , _lowerCamelCase = None, None
if yolos_name == "yolos_ti":
_lowerCamelCase = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
_lowerCamelCase = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
_lowerCamelCase = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
_lowerCamelCase = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
_lowerCamelCase = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
_lowerCamelCase = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
_lowerCamelCase = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
_lowerCamelCase = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
_lowerCamelCase = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
_lowerCamelCase = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCamelCase , atol=1e-4 )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
_lowerCamelCase = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
_lowerCamelCase = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCamelCase , organization='hustvl' )
model.push_to_hub(UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 234 | import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : str , snake_case__ : int = 1_3 , snake_case__ : int = 6_4 , snake_case__ : int = 2 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 1_2_8 , snake_case__ : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , snake_case__ : int = 7 , snake_case__ : int = 4 , snake_case__ : int = 3_7 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 1_0 , snake_case__ : float = 0.02 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 1_2_8 , snake_case__ : List[int] = [2, 2, 2, 2] , snake_case__ : int = 2 , snake_case__ : int = 2 , ) -> Optional[Any]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = encoder_stride
_lowerCamelCase = num_attention_outputs
_lowerCamelCase = embed_dim
_lowerCamelCase = embed_dim + 1
_lowerCamelCase = resolution
_lowerCamelCase = depths
_lowerCamelCase = hidden_sizes
_lowerCamelCase = dim
_lowerCamelCase = mlp_expansion_ratio
def _snake_case ( self : Union[str, Any] ) -> List[str]:
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Union[str, Any] ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _snake_case ( self : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] ) -> Optional[int]:
_lowerCamelCase = TFEfficientFormerModel(config=snake_case__ )
_lowerCamelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self : Optional[Any] ) -> Any:
_lowerCamelCase = TFEfficientFormerModelTester(self )
_lowerCamelCase = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _snake_case ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _snake_case ( self : Optional[int] ) -> List[str]:
pass
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def _snake_case ( self : int ) -> int:
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str ):
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_lowerCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_lowerCamelCase = seq_length * self.model_tester.chunk_length
else:
_lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_lowerCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'decoder_seq_length' , snake_case__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _snake_case ( self : Dict , snake_case__ : int , snake_case__ : str , snake_case__ : List[str]=False ) -> List[Any]:
_lowerCamelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Optional[Any] ) -> Dict:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _snake_case ( self : str ) -> Tuple:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _snake_case ( self : Union[str, Any] ) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = TFEfficientFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _snake_case ( self : List[Any] ) -> int:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = True
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'encoder_seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'key_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'chunk_length' , snake_case__ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_lowerCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _snake_case ( self : Any ) -> Union[str, Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_lowerCamelCase = model_class(snake_case__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_lowerCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=snake_case__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_lowerCamelCase = model(snake_case__ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( ) -> Optional[int]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ) -> Tuple:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : List[str] ) -> List[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _snake_case ( self : List[Any] ) -> Optional[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) ) | 234 | 1 |
__magic_name__: Optional[int] = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 324 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict:
"""simple docstring"""
if "." in tensor_name:
A__ = tensor_name.split('''.''' )
for split in splits[:-1]:
A__ = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
A__ = new_module
A__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
A__ = tensor_name in module._buffers
A__ = getattr(lowercase_ , lowercase_ )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
A__ = False
A__ = False
if is_buffer or not is_bitsandbytes_available():
A__ = False
A__ = False
else:
A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
A__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to('''cpu''' )
if value.dtype == torch.inta:
A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
A__ = torch.tensor(lowercase_ , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None:
A__ = new_value.T
A__ = old_value.__dict__
if is_abit:
A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
elif is_abit:
A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ )
A__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) )
else:
if value is None:
A__ = old_value.to(lowercase_ )
elif isinstance(lowercase_ , torch.Tensor ):
A__ = value.to(lowercase_ )
else:
A__ = torch.tensor(lowercase_ , device=lowercase_ )
if is_buffer:
A__ = new_value
else:
A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad )
A__ = new_value
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
A__ = []
current_key_name.append(lowercase_ )
if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase_ , lowercase_ ):
A__ , A__ = module.weight.shape
else:
A__ = module.in_features
A__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A__ = bnb.nn.LinearabitLt(
lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A__ = bnb.nn.Linearabit(
lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A__ = True
# Store the module class in case we need to transpose the weight later
A__ = type(lowercase_ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase_ )
if len(list(module.children() ) ) > 0:
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple:
"""simple docstring"""
A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
A__ , A__ = _replace_with_bnb_linear(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict:
"""simple docstring"""
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , )
return replace_with_bnb_linear(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , )
return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A__ = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
A__ = sum(lowercase_ , [] )
A__ = len(lowercase_ ) > 0
# Check if it is a base model
A__ = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A__ = list(model.named_children() )
A__ = [list_modules[-1][0]]
# add last module together with tied weights
A__ = set(lowercase_ ) - set(lowercase_ )
A__ = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
A__ = ['''.weight''', '''.bias''']
A__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A__ = name.replace(lowercase_ , '''''' )
filtered_module_names.append(lowercase_ )
return filtered_module_names
| 87 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a__:
def __init__( self : List[Any] , __snake_case : Dict=None , __snake_case : Optional[int]=None ):
a : Any = list(poly_a or [0] )[:]
a : Dict = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a : Optional[int] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a : str = self.__multiply()
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any] ):
a : Tuple = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(__snake_case ) <= 1:
return dft[0]
#
a : List[Any] = self.c_max_length // 2
while next_ncol > 0:
a : str = [[] for i in range(__snake_case )]
a : List[str] = self.root**next_ncol
# First half of next step
a : Union[str, Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__snake_case ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__snake_case ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a : Tuple = new_dft
a : List[Any] = next_ncol // 2
return dft[0]
def lowercase_ ( self : int ):
a : List[Any] = self.__dft('A' )
a : Union[str, Any] = self.__dft('B' )
a : Any = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a : Tuple = 2
while next_ncol <= self.c_max_length:
a : Tuple = [[] for i in range(__snake_case )]
a : Tuple = self.root ** (next_ncol // 2)
a : Optional[int] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a : List[Any] = new_inverse_c
next_ncol *= 2
# Unpack
a : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ):
a : Optional[Any] = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
a : List[str] = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
a : Optional[Any] = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase: Union[str, Any] = logging.get_logger(__name__)
class a__( lowerCamelCase__ ):
lowercase__ = ["""pixel_values"""]
def __init__( self : int , __snake_case : bool = True , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : List[str] , ):
super().__init__(**__snake_case )
a : Dict = size if size is not None else {'shortest_edge': 2_56}
a : Optional[int] = get_size_dict(__snake_case , default_to_square=__snake_case )
a : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
a : Dict = get_size_dict(__snake_case , param_name='crop_size' )
a : Optional[int] = do_resize
a : Union[str, Any] = size
a : Optional[Any] = resample
a : int = do_center_crop
a : List[str] = crop_size
a : Tuple = do_rescale
a : Any = rescale_factor
a : str = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : str , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
a : str = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a : str = get_resize_output_image_size(__snake_case , size=size['shortest_edge'] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ):
a : Any = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__snake_case , size=(size['height'], size['width']) , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : str , __snake_case : np.ndarray , __snake_case : float , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[str] , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Union[str, Any] , ):
a : Dict = do_resize if do_resize is not None else self.do_resize
a : str = size if size is not None else self.size
a : Dict = get_size_dict(__snake_case , default_to_square=__snake_case )
a : Optional[int] = resample if resample is not None else self.resample
a : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : int = crop_size if crop_size is not None else self.crop_size
a : List[str] = get_size_dict(__snake_case , param_name='crop_size' )
a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
a : Dict = image_mean if image_mean is not None else self.image_mean
a : Dict = image_std if image_std is not None else self.image_std
a : int = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
a : int = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
a : List[Any] = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
a : Union[str, Any] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
a : List[str] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
a : str = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
a : Tuple = {'pixel_values': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
def lowercase_ ( self : List[Any] , __snake_case : Tuple , __snake_case : List[Tuple] = None ):
a : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__snake_case ) != len(__snake_case ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__snake_case ):
a : Optional[int] = target_sizes.numpy()
a : Union[str, Any] = []
for idx in range(len(__snake_case ) ):
a : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__snake_case )
a : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__snake_case )
else:
a : List[Any] = logits.argmax(dim=1 )
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 195 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = """distilbert"""
__lowercase = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self :Tuple , lowercase_ :List[str]=3_05_22 , lowercase_ :Tuple=5_12 , lowercase_ :List[str]=False , lowercase_ :int=6 , lowercase_ :Optional[int]=12 , lowercase_ :Optional[int]=7_68 , lowercase_ :str=4 * 7_68 , lowercase_ :List[Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :Optional[int]="gelu" , lowercase_ :Optional[Any]=0.0_2 , lowercase_ :Optional[Any]=0.1 , lowercase_ :List[Any]=0.2 , lowercase_ :str=0 , **lowercase_ :Union[str, Any] , )-> str:
A__ = vocab_size
A__ = max_position_embeddings
A__ = sinusoidal_pos_embds
A__ = n_layers
A__ = n_heads
A__ = dim
A__ = hidden_dim
A__ = dropout
A__ = attention_dropout
A__ = activation
A__ = initializer_range
A__ = qa_dropout
A__ = seq_classif_dropout
super().__init__(**lowercase_ , pad_token_id=lowercase_ )
class UpperCAmelCase ( UpperCamelCase__ ):
@property
def UpperCAmelCase_ ( self :List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 440 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCAmelCase : Union[str, Any] ={str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
A__ = ""
A__ = 0
A__ = 0
while div != 1:
A__, A__ = divmod(_lowerCamelCase , _lowerCamelCase )
if base >= 11 and 9 < mod < 36:
A__ = ALPHABET_VALUES[str(_lowerCamelCase )]
else:
A__ = str(_lowerCamelCase )
new_value += actual_value
A__ = num // base
A__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 440 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_a : Any = logging.get_logger(__name__)
class a_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ):
"""simple docstring"""
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 719 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''deit'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=1_6 , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[int] = hidden_size
a_ : Any = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : List[str] = intermediate_size
a_ : List[Any] = hidden_act
a_ : Optional[int] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : Optional[Any] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : str = image_size
a_ : Any = patch_size
a_ : Optional[Any] = num_channels
a_ : str = qkv_bias
a_ : Optional[Any] = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Optional[int] = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> float:
return 1E-4
| 570 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase_ : Union[str, Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 | 1 |
class __magic_name__ :
'''simple docstring'''
def __init__( self:int , _a:Tuple ):
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:List[str] ):
return index | (index + 1)
@staticmethod
def SCREAMING_SNAKE_CASE__ ( _a:str ):
return (index & (index + 1)) - 1
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int] , _a:List[str] ):
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(lowercase__ ) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(lowercase__ , lowercase__ , lowercase__ )
snake_case__ = self.get_next(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Tuple , _a:Any ):
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(lowercase__ )
if left <= current_left:
snake_case__ = max(lowercase__ , self.tree[right] )
snake_case__ = current_left
else:
snake_case__ = max(lowercase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = LayoutLMTokenizer
__lowercase : List[str] = LayoutLMTokenizerFast
__lowercase : Dict = True
__lowercase : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
super().setUp()
snake_case__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE__ ( self:List[str] , **_a:Optional[Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Dict ):
snake_case__ = '''UNwant\u00E9d,running'''
snake_case__ = '''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = self.tokenizer_class(self.vocab_file )
snake_case__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
pass
| 208 | 0 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
_snake_case = 100
_snake_case = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_snake_case = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _A ( __magic_name__ ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase__ = set()
lowercase__ = 42
lowercase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _A ( __magic_name__ = 5000 ):
for number_to_partition in range(1 , __magic_name__ ):
if len(partition(__magic_name__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowercase ( A_ ):
lowerCamelCase_ =''''''
lowerCamelCase_ =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ =None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Dict , __lowerCAmelCase : Any = "" , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Union[str, Any] = None , **__lowerCAmelCase : Tuple) -> Tuple:
super().__init__(self , **__lowerCAmelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowercase_ = fsspec.open(
__lowerCAmelCase , mode="rb" , protocol=__lowerCAmelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowercase_ = os.path.basename(self.file.path.split("::")[0])
lowercase_ = (
self.compressed_name[: self.compressed_name.rindex(".")]
if """.""" in self.compressed_name
else self.compressed_name
)
lowercase_ = None
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCAmelCase : Optional[int]) -> str:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__lowerCAmelCase).lstrip("/")
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
if self.dir_cache is None:
lowercase_ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
lowercase_ = {f["""name"""]: f}
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Tuple) -> int:
return self.file.open().read()
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any = "rb" , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=None , **__lowerCAmelCase : List[Any] , ) -> Optional[int]:
lowercase_ = self._strip_protocol(__lowerCAmelCase)
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'')
return self.file.open()
class lowercase ( A_ ):
lowerCamelCase_ ='''bz2'''
lowerCamelCase_ ='''bz2'''
lowerCamelCase_ ='''.bz2'''
class lowercase ( A_ ):
lowerCamelCase_ ='''gzip'''
lowerCamelCase_ ='''gzip'''
lowerCamelCase_ ='''.gz'''
class lowercase ( A_ ):
lowerCamelCase_ ='''lz4'''
lowerCamelCase_ ='''lz4'''
lowerCamelCase_ ='''.lz4'''
class lowercase ( A_ ):
lowerCamelCase_ ='''xz'''
lowerCamelCase_ ='''xz'''
lowerCamelCase_ ='''.xz'''
class lowercase ( A_ ):
lowerCamelCase_ ='''zstd'''
lowerCamelCase_ ='''zstd'''
lowerCamelCase_ ='''.zst'''
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int = "rb" , __lowerCAmelCase : str = None , __lowerCAmelCase : Dict = None , __lowerCAmelCase : Dict = DEFAULT_BLOCK_SIZE , **__lowerCAmelCase : List[str] , ) -> Optional[int]:
super().__init__(
fo=__lowerCAmelCase , mode=__lowerCAmelCase , target_protocol=__lowerCAmelCase , target_options=__lowerCAmelCase , block_size=__lowerCAmelCase , **__lowerCAmelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowercase_ = self.file.__enter__
class lowercase :
def __init__( self : Any , __lowerCAmelCase : Optional[Any]) -> Optional[int]:
lowercase_ = file_
def __enter__( self : List[str]) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : str , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str]) -> Any:
self._file.__exit__(*__lowerCAmelCase , **__lowerCAmelCase)
def __iter__( self : Tuple) -> Union[str, Any]:
return iter(self._file)
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
return next(self._file)
def __getattr__( self : List[Any] , __lowerCAmelCase : List[str]) -> str:
return getattr(self._file , __lowerCAmelCase)
def fixed_enter(*__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : int):
return WrappedFile(_enter(*__lowerCAmelCase , **__lowerCAmelCase))
lowercase_ = fixed_enter
| 708 | '''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowercase ( __lowerCamelCase ):
def __init__( self : str , __lowerCAmelCase : Callable , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[dict] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Union[str, Any] , ) -> List[Any]:
super().__init__(
features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase_ = Generator(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , generator=__lowerCAmelCase , gen_kwargs=__lowerCAmelCase , **__lowerCAmelCase , )
def __UpperCAmelCase ( self : Optional[int]) -> List[str]:
# Build iterable dataset
if self.streaming:
lowercase_ = self.builder.as_streaming_dataset(split="train")
# Build regular (map-style) dataset
else:
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
lowercase_ = self.builder.as_dataset(
split="train" , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
| 461 | 0 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.abspath(A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowerCAmelCase__ = tf.train.list_variables(A )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase__ = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(A )
# read data
lowerCAmelCase__ = tf.train.load_variable(A , A )
names.append('''/'''.join(A ) )
arrays.append(A )
logger.info(F"""Read a total of {len(A ):,} layers""" )
# Sanity check
if len(set(A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(A ) )})""" )
lowerCAmelCase__ = list(set(A ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(A , A ):
lowerCAmelCase__ = full_name.split('''/''' )
lowerCAmelCase__ = model
lowerCAmelCase__ = []
for i, m_name in enumerate(A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowerCAmelCase__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowerCAmelCase__ = getattr(A , '''encoder''' )
lowerCAmelCase__ = getattr(A , '''layer''' )
lowerCAmelCase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''pooler''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowerCAmelCase__ = getattr(A , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowerCAmelCase__ = getattr(A , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowerCAmelCase__ = getattr(A , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowerCAmelCase__ = getattr(A , '''token_type_embeddings''' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''attention''' )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowerCAmelCase__ = getattr(A , '''output''' )
lowerCAmelCase__ = getattr(A , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowerCAmelCase__ = getattr(A , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowerCAmelCase__ = getattr(A , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowerCAmelCase__ = getattr(A , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowerCAmelCase__ = getattr(A , '''intermediate''' )
lowerCAmelCase__ = getattr(A , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowerCAmelCase__ = getattr(A , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowerCAmelCase__ = getattr(A , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowerCAmelCase__ = getattr(A , '''weight''' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowerCAmelCase__ = '''.'''.join(A )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , A ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , A ):
lowerCAmelCase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase__ = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase__ = torch.from_numpy(A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def _snake_case ( A , A , A ) -> Any:
# Instantiate model
logger.info(F"""Loading model based on config from {config_path}...""" )
lowerCAmelCase__ = BertConfig.from_json_file(A )
lowerCAmelCase__ = BertModel(A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(A , A , A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__UpperCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 90 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=-1 ) -> List[Any]:
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ = label_idx
def UpperCamelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(lowerCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
return examples
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> str:
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowerCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowerCAmelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : str ) -> Dict:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
return examples
def UpperCamelCase ( self : int , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> List[str]:
UpperCAmelCase_ = 0
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ''''''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCAmelCase_ )
example_id += 1
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 121 | 0 |
import math
import os
import sys
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''
try:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : dict[str, str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lexicon.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = last_match_id
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ = bin(_SCREAMING_SNAKE_CASE )[2:]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '', ''
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
index += 1
SCREAMING_SNAKE_CASE_ = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ = lexicon[curr_string]
result += last_match_id
return result
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = os.path.getsize(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = bin(_SCREAMING_SNAKE_CASE )[2:]
SCREAMING_SNAKE_CASE_ = len(_SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 8
try:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = read_file_binary(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = compress_data(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 620 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( lowerCAmelCase__ ):
def __init__( self , _A , _A , _A , _A = None , ):
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A)
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(','):
SCREAMING_SNAKE_CASE_ = int(_A)
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items()))
def lowerCAmelCase__ ( self , _A):
if not isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = list(_A)
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , _A , _A = 4.0 , _A = None , _A = 50 , _A = "pil" , _A = True , ):
SCREAMING_SNAKE_CASE_ = len(_A)
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(_A , device=self.device).reshape(-1)
SCREAMING_SNAKE_CASE_ = torch.tensor([1000] * batch_size , device=self.device)
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(_A) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0)
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(_A , _A)
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(_A):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == 'mps'
if isinstance(_A , _A):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
_A , timestep=_A , class_labels=_A).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , len(_A) // 2 , dim=0)
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0)
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.split(_A , _A , dim=1)
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0)
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(_A).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A)
| 620 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase = """roberta"""
elif args.model_type == "gpt2":
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCAmelCase = """transformer"""
UpperCAmelCase = model.state_dict()
UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCAmelCase = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCAmelCase = f'''{prefix}.embeddings.{w}.weight'''
UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCAmelCase = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCAmelCase = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCAmelCase = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[f'''lm_head.dense.{w}''']
UpperCAmelCase = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCAmelCase = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCAmelCase = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
UpperCAmelCase = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
UpperCAmelCase = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
UpperCAmelCase = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
UpperCAmelCase = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self) -> str:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""")),
"""references""": datasets.Value("""string"""),
}) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1":
raise ValueError(_WARNING)
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""")
with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor:
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = Counter()
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE)
for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)):
for candidate in candidates:
_lowerCamelCase : Any = candidate + """\n""" + test_case
_lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id])
_lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE)
futures.append(SCREAMING_SNAKE_CASE)
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = future.result()
results[result["task_id"]].append((result["""completion_id"""], result))
_lowerCamelCase , _lowerCamelCase : List[Any] = [], []
for result in results.values():
result.sort()
_lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result]
total.append(len(SCREAMING_SNAKE_CASE))
correct.append(sum(SCREAMING_SNAKE_CASE))
_lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = k
_lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ):
"""simple docstring"""
def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(__snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) )
else:
assert len(__snake_case ) == len(__snake_case )
_lowerCamelCase : List[str] = iter(__snake_case )
return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
| 88 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Union[str, Any] = None
a : Optional[Any] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : Dict = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
a : Any = '''▁'''
class a_ ( _UpperCAmelCase ):
a : List[str] = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = AlbertTokenizer
def __init__( self : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Any=True , __UpperCamelCase : int=True , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : List[Any]="[SEP]" , __UpperCamelCase : Dict="<unk>" , __UpperCamelCase : Dict="[SEP]" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : Optional[int]="[MASK]" , **__UpperCamelCase : Optional[Any] , ) ->int:
'''simple docstring'''
_UpperCAmelCase = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
import os
import sys
__A = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__A = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCamelCase__ ( *lowercase__ : Optional[int] , **lowercase__ : Union[str, Any] ):
return AutoConfig.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCamelCase__ ( *lowercase__ : str , **lowercase__ : List[Any] ):
return AutoTokenizer.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCamelCase__ ( *lowercase__ : Union[str, Any] , **lowercase__ : Tuple ):
return AutoModel.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCamelCase__ ( *lowercase__ : Dict , **lowercase__ : Dict ):
return AutoModelForCausalLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCamelCase__ ( *lowercase__ : List[Any] , **lowercase__ : Any ):
return AutoModelForMaskedLM.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCamelCase__ ( *lowercase__ : Tuple , **lowercase__ : str ):
return AutoModelForSequenceClassification.from_pretrained(*lowercase__ , **lowercase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCamelCase__ ( *lowercase__ : List[str] , **lowercase__ : Union[str, Any] ):
return AutoModelForQuestionAnswering.from_pretrained(*lowercase__ , **lowercase__ )
| 134 |
"""simple docstring"""
import numpy as np
def UpperCamelCase__ ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Dict , lowercase__ : List[str] ):
snake_case : Optional[int] = int(np.ceil((x_end - xa) / h ) )
snake_case : int = np.zeros((n + 1,) )
snake_case : Optional[Any] = ya
snake_case : List[Any] = xa
for k in range(lowercase__ ):
snake_case : Tuple = f(lowercase__ , y[k] )
snake_case : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case : int = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case : Dict = f(x + h , y[k] + h * ka )
snake_case : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def A_( A : str , A : Tuple , A : Tuple):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def A_( A : np.ndarray , A : Optional[str] , A : Optional[str]):
UpperCamelCase = to_pil_image(A)
UpperCamelCase , UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(A , lang=A , output_type='dict' , config=A)
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(A) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(A) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(A) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(A) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(A) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(A) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(A , A , A , A):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(A)
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A))
assert len(A) == len(A), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BILINEAR , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , A_ = None , A_ = "" , **A_ , )-> None:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = size if size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(A_ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_value
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def UpperCAmelCase_ ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(A_ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(A_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase , UpperCamelCase = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
UpperCamelCase = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images]
UpperCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 432 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : str = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 432 | 1 |
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase__ = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__magic_name__ )
else:
lowercase__ = sylvester(number - 1 )
lowercase__ = num - 1
lowercase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 15 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 0.9 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : List[str] = crop_pct
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Any = crop_size
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
lowerCamelCase__ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : List[Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : int = int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCamelCase__ : int = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
lowerCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 295 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=[10, 20, 30, 40] , UpperCAmelCase_ : Dict=[2, 2, 3, 2] , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=37 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : int=10 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=["stage2", "stage3", "stage4"] , UpperCAmelCase_ : List[str]=[2, 3, 4] , UpperCAmelCase_ : List[str]=None , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =parent
lowerCamelCase__: int =batch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Tuple =num_stages
lowerCamelCase__: Union[str, Any] =hidden_sizes
lowerCamelCase__: List[Any] =depths
lowerCamelCase__: Tuple =is_training
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Tuple =intermediate_size
lowerCamelCase__: List[str] =hidden_act
lowerCamelCase__: Optional[Any] =num_labels
lowerCamelCase__: Tuple =initializer_range
lowerCamelCase__: Tuple =out_features
lowerCamelCase__: Tuple =out_indices
lowerCamelCase__: Dict =scope
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Any =None
if self.use_labels:
lowerCamelCase__: Dict =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: Optional[Any] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: int =ConvNextVaModel(config=a_)
model.to(a_)
model.eval()
lowerCamelCase__: Any =model(a_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[int] =ConvNextVaForImageClassification(a_)
model.to(a_)
model.eval()
lowerCamelCase__: Optional[int] =model(a_ , labels=a_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =ConvNextVaBackbone(config=a_)
model.to(a_)
model.eval()
lowerCamelCase__: int =model(a_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowerCamelCase__: Tuple =None
lowerCamelCase__: Tuple =ConvNextVaBackbone(config=a_)
model.to(a_)
model.eval()
lowerCamelCase__: List[Any] =model(a_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.prepare_config_and_inputs()
lowerCamelCase__: Any =config_and_inputs
lowerCamelCase__: str ={"""pixel_values""": pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.prepare_config_and_inputs()
lowerCamelCase__: List[str] =config_and_inputs
lowerCamelCase__: Any ={"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a , __a , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =ConvNextVaModelTester(self)
lowerCamelCase__: int =ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : Any) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def SCREAMING_SNAKE_CASE_ (self : str) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__: List[Any] =True
if model_class.__name__ in [
*get_values(a_),
*get_values(a_),
]:
continue
lowerCamelCase__: Tuple =model_class(a_)
model.to(a_)
model.train()
lowerCamelCase__: Optional[Any] =self._prepare_for_class(a_ , a_ , return_labels=a_)
lowerCamelCase__: Any =model(**a_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__: Any =False
lowerCamelCase__: List[Any] =True
if (
model_class.__name__
in [*get_values(a_), *get_values(a_)]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__: Dict =model_class(a_)
model.to(a_)
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__: str =self._prepare_for_class(a_ , a_ , return_labels=a_)
lowerCamelCase__: Optional[int] =model(**a_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: List[str] =model_class(a_)
lowerCamelCase__: Union[str, Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: int =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->int:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple):
lowerCamelCase__: Optional[Any] =model_class(a_)
model.to(a_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(a_ , a_))
lowerCamelCase__: Optional[int] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__: Optional[int] =self.model_tester.num_stages
self.assertEqual(len(a_) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] =True
check_hidden_states_output(a_ , a_ , a_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: List[str] =True
check_hidden_states_output(a_ , a_ , a_)
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: str =ConvNextVaModel.from_pretrained(a_)
self.assertIsNotNone(a_)
def lowerCAmelCase_ ( ) -> int:
"""simple docstring"""
lowerCamelCase__: List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(a_)
lowerCamelCase__: Union[str, Any] =self.default_image_processor
lowerCamelCase__: List[Any] =prepare_img()
lowerCamelCase__: Optional[int] =preprocessor(images=a_ , return_tensors="pt").to(a_)
# forward pass
with torch.no_grad():
lowerCamelCase__: Optional[int] =model(**a_)
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , a_)
lowerCamelCase__: Optional[int] =torch.tensor([0.9996, 0.1966, -0.4386]).to(a_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4))
| 706 |
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__A = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__A = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =get_test_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: List[str] =get_test_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={"BertModelTest": "BertModelTester"}
lowerCamelCase__: Optional[int] ={
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =get_model_to_test_mapping(UpperCAmelCase_)
lowerCamelCase__: int =get_model_to_test_mapping(UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
lowerCamelCase__: Any ={
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =get_model_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =get_model_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: Any ={
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
lowerCamelCase__: Optional[Any] ={
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
| 437 | 0 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a__ : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.14.0', 'To fix: pip install -r examples/pytorch/audio-classification/requirements.txt')
def __snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : int = 16_000 ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE_ ) <= sample_length:
return wav
UpperCAmelCase = randint(0 , len(SCREAMING_SNAKE_CASE_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(default=UpperCAmelCase_ , metadata={"help": "Name of a dataset from the datasets package"} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the training audio paths and labels."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "A file containing the validation audio paths and labels."} )
_lowerCamelCase =field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
_lowerCamelCase =field(
default="validation" , metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
_lowerCamelCase =field(
default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , )
_lowerCamelCase =field(
default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_lowerCamelCase =field(
default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
_lowerCamelCase =field(
default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_lowerCamelCase =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_lowerCamelCase =field(
default=UpperCAmelCase_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def __snake_case ( self : Union[str, Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , a__ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def __snake_case ( ) -> int:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase = DatasetDict()
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"{', '.join(raw_datasets['train'].column_names )}." )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"{', '.join(raw_datasets['train'].column_names )}." )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase = feature_extractor.model_input_names[0]
def train_transforms(SCREAMING_SNAKE_CASE_ : int ):
UpperCAmelCase = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(SCREAMING_SNAKE_CASE_ : List[Any] ):
UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE_ )}
UpperCAmelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
UpperCAmelCase, UpperCAmelCase = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = label
# Load the accuracy metric from the datasets package
UpperCAmelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE_ : Dict ):
UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=eval_pred.label_ids )
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE_ ) , labelaid=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE_ , output_all_columns=SCREAMING_SNAKE_CASE_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE_ , output_all_columns=SCREAMING_SNAKE_CASE_ )
# Initialize our trainer
UpperCAmelCase = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE_ )
# Write model card and (optionally) push to hub
UpperCAmelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 51 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
__a : List[Any] = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_0_2_4,
'hidden_size': 7_6_8,
'max_length': 5_1_2,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_0_2_4,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__a : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__a : List[str] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__a : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__a : Optional[Any] = os.path.join(get_home_dir() , 'models' )
__a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__a : Any = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__a : Dict = original_bort._collect_params_with_prefix()
# Build our config 🤗
__a : Optional[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCamelCase_ ),
}
__a : str = BertConfig.from_dict(lowerCamelCase_ )
__a : Optional[int] = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__a : Optional[int] = hf_param.shape
__a : int = to_torch(params[gluon_param] )
__a : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__a : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__a : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__a : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__a : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__a : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__a : BertSelfAttention = layer.attention.self
__a : Optional[int] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__a : str = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__a : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__a : str = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__a : Dict = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__a : str = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__a : BertSelfOutput = layer.attention.output
__a : Tuple = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__a : Dict = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__a : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__a : BertIntermediate = layer.intermediate
__a : List[str] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__a : Optional[Any] = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__a : BertOutput = layer.output
__a : str = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__a : List[Any] = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__a : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__a : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' )
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids']
# Get gluon output
__a : Optional[int] = mx.nd.array([input_ids] )
__a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' )
__a : int = hf_bort_model(**lowerCamelCase_ )[0]
__a : Dict = output_gluon[0].asnumpy()
__a : str = output_hf[0].detach().numpy()
__a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCamelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 47 | 0 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _SCREAMING_SNAKE_CASE ( unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = load_tool("text-classification" )
self.tool.setup()
_UpperCamelCase : List[Any] = load_tool("text-classification" , remote=lowerCAmelCase__ )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
def lowercase_ (self ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(lowerCAmelCase__ , "positive" )
| 239 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 239 | 1 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Tuple = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 8 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = do_rescale
UpperCamelCase : List[str] = rescale_factor
UpperCamelCase : List[Any] = do_pad
UpperCamelCase : int = pad_size
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase , UpperCamelCase : List[Any] = get_image_size(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = (old_height // size + 1) * size - old_height
UpperCamelCase : Union[str, Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Tuple = do_pad if do_pad is not None else self.do_pad
UpperCamelCase : int = pad_size if pad_size is not None else self.pad_size
UpperCamelCase : Dict = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_pad:
UpperCamelCase : List[Any] = [self.pad(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 499 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Tuple = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase : str = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : Optional[int] = resample
UpperCamelCase : str = do_center_crop
UpperCamelCase : Union[str, Any] = crop_size
UpperCamelCase : List[Any] = do_rescale
UpperCamelCase : List[str] = rescale_factor
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
UpperCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Any = image_std if image_std is not None else self.image_std
UpperCamelCase : Dict = size if size is not None else self.size
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase : int = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase : str = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 499 | 1 |
from manim import *
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self ):
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('CPU' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = [mem.copy() for i in range(1 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('GPU' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('Model' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , )
__snake_case = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE_ ) , Write(SCREAMING_SNAKE_CASE_ ) )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE_ )
cpu_target.generate_target()
__snake_case = 0.4_6 / 4
__snake_case = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE_ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(*SCREAMING_SNAKE_CASE_ )
self.wait()
| 345 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__snake_case = BlipaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__snake_case = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__snake_case = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 345 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = """funnel"""
__UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : List[str] ,_lowerCAmelCase : Dict=30_522 ,_lowerCAmelCase : List[str]=[4, 4, 4] ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : str=2 ,_lowerCAmelCase : List[Any]=768 ,_lowerCAmelCase : List[Any]=12 ,_lowerCAmelCase : Optional[Any]=64 ,_lowerCAmelCase : Union[str, Any]=3_072 ,_lowerCAmelCase : int="gelu_new" ,_lowerCAmelCase : List[Any]=0.1 ,_lowerCAmelCase : Dict=0.1 ,_lowerCAmelCase : List[str]=0.0 ,_lowerCAmelCase : Optional[int]=0.1 ,_lowerCAmelCase : Tuple=None ,_lowerCAmelCase : List[str]=1E-9 ,_lowerCAmelCase : int="mean" ,_lowerCAmelCase : List[str]="relative_shift" ,_lowerCAmelCase : Union[str, Any]=True ,_lowerCAmelCase : str=True ,_lowerCAmelCase : str=True ,**_lowerCAmelCase : str ,):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = block_sizes
__snake_case = [1] * len(_lowerCAmelCase ) if block_repeats is None else block_repeats
assert len(_lowerCAmelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__snake_case = num_decoder_layers
__snake_case = d_model
__snake_case = n_head
__snake_case = d_head
__snake_case = d_inner
__snake_case = hidden_act
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = initializer_range
__snake_case = initializer_std
__snake_case = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
__snake_case = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
__snake_case = attention_type
__snake_case = separate_cls
__snake_case = truncate_seq
__snake_case = pool_q_only
super().__init__(**_lowerCAmelCase )
@property
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase_ ( self : Union[str, Any] ,_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : str ):
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 524 | lowerCamelCase__ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 524 | 1 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A__ : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 124 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''openai/whisper-base'''
A__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
A__ = '''transcriber'''
A__ = WhisperProcessor
A__ = WhisperForConditionalGeneration
A__ = ['''audio''']
A__ = ['''text''']
def A_ ( self : Any , __a : int ) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , return_tensors='pt' ).input_features
def A_ ( self : Optional[int] , __a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(inputs=__a )
def A_ ( self : str , __a : str ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0]
| 124 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( A , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Union[str, Any] = KandinskyInpaintPipeline
lowercase_ : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
lowercase_ : Dict = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
lowercase_ : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase_ : Optional[Any] = False
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return 1_00
@property
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCAmelCase__ : Any = MultilingualCLIP(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase__ : List[str] = UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCamelCase ( self : Any ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = self.dummy_text_encoder
UpperCAmelCase__ : Any = self.dummy_tokenizer
UpperCAmelCase__ : Tuple = self.dummy_unet
UpperCAmelCase__ : str = self.dummy_movq
UpperCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case__ , )
UpperCAmelCase__ : List[Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple=0 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case__ )
# create init_image
UpperCAmelCase__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
UpperCAmelCase__ : Any = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase__ : Any = 0
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Tuple = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = "cpu"
UpperCAmelCase__ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase__ : int = self.pipeline_class(**snake_case__ )
UpperCAmelCase__ : List[Any] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase__ : int = output.images
UpperCAmelCase__ : Tuple = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase__ : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Tuple = "a hat"
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase__ : int = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[int] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase__ : Optional[int] = pipeline(
snake_case__ , image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 199 |
'''simple docstring'''
from math import factorial
SCREAMING_SNAKE_CASE = {str(digit): factorial(digit) for digit in range(1_0)}
def snake_case_ ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowercase__ ) )
def snake_case_ ( lowercase__ = 6_0 , lowercase__ = 1_0_0_0_0_0_0 ):
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCAmelCase__ : Union[str, Any] = 0
# the cached sizes of the previous chains
UpperCAmelCase__ : dict[int, int] = {}
for start_chain_element in range(1 , lowercase__ ):
# The temporary set will contain the elements of the chain
UpperCAmelCase__ : Any = set()
UpperCAmelCase__ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCAmelCase__ : List[str] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowercase__ )
chain_set_length += 1
UpperCAmelCase__ : List[str] = digit_factorial_sum(lowercase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCAmelCase__ : List[str] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 199 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase : List[str] = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase : Any = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else 0.0
A : Tuple = n_correct / len(SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 343 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
__magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# No kwarg
A : Dict = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Any = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
A : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
A : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=SCREAMING_SNAKE_CASE , )
self.run_entailment_id(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = zero_shot_classifier.model.config
A : int = config.labelaid
A : Union[str, Any] = zero_shot_classifier.entailment_id
A : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A : Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 343 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Dict = "mobilenet_v2"
def __init__( self : Union[str, Any] , __lowerCAmelCase : Any=3 , __lowerCAmelCase : str=2_24 , __lowerCAmelCase : Optional[int]=1.0 , __lowerCAmelCase : Dict=8 , __lowerCAmelCase : Any=8 , __lowerCAmelCase : int=6 , __lowerCAmelCase : str=32 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any="relu6" , __lowerCAmelCase : int=True , __lowerCAmelCase : int=0.8 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Dict=0.001 , __lowerCAmelCase : Dict=2_55 , **__lowerCAmelCase : Optional[Any] , ) -> Dict:
super().__init__(**__lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_A = num_channels
_A = image_size
_A = depth_multiplier
_A = depth_divisible_by
_A = min_depth
_A = expand_ratio
_A = output_stride
_A = first_layer_is_expansion
_A = finegrained_output
_A = hidden_act
_A = tf_padding
_A = classifier_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = semantic_loss_ignore_index
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : Union[str, Any] = version.parse("1.11")
@property
def snake_case_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def snake_case_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def snake_case_ ( self : Optional[int] ) -> float:
return 1E-4
| 2 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_SCREAMING_SNAKE_CASE = 5_0_0_0_3
_SCREAMING_SNAKE_CASE = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = PLBartTokenizer
a : Optional[Any] = None
a : Optional[Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''base''' ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 4 ,_lowerCamelCase )]
self.assertListEqual(_lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowercase = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,)
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = PLBartTokenizer(_lowerCamelCase ,language_codes='''multi''' ,keep_accents=_lowerCamelCase )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_lowerCamelCase ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
__lowercase = tokenizer.vocab_size
__lowercase = [tokenizer.convert_ids_to_tokens(_lowerCamelCase ) for x in range(end - 7 ,_lowerCamelCase )]
self.assertListEqual(
_lowerCamelCase ,['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__lowercase = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__lowercase = tokenizer(_lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) ,_lowerCamelCase ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
a : Tuple = "uclanlp/plbart-python-en_XX"
a : List[Any] = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
a : List[Any] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
a : Union[str, Any] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _UpperCAmelCase (cls ) -> Dict:
'''simple docstring'''
__lowercase = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes='''base''' ,src_lang='''python''' ,tgt_lang='''en_XX''' )
__lowercase = 1
return cls
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] ,50003 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self.assertIn(_lowerCamelCase ,self.tokenizer.all_special_ids )
__lowercase = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
__lowercase = self.tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] ,_lowerCamelCase )
__lowercase = 10
__lowercase = self.tokenizer(_lowerCamelCase ,max_length=_lowerCamelCase ,truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) ,[50004, 50001] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = PLBartTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,_lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='''pt''' ,)
__lowercase = shift_tokens_right(batch['''labels'''] ,self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,_lowerCamelCase )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=3 ,return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text ,padding=_lowerCamelCase ,truncation=_lowerCamelCase ,max_length=10 ,return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(_lowerCamelCase ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' ,return_tensors='''pt''' ,src_lang='''en_XX''' ,tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_lowerCamelCase ) ,{
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50001,
} ,)
| 502 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase : int = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class UpperCAmelCase__ ( __a , __a ):
a : List[str] = '''swin'''
a : str = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , UpperCamelCase=224 , UpperCamelCase=4 , UpperCamelCase=3 , UpperCamelCase=96 , UpperCamelCase=[2, 2, 6, 2] , UpperCamelCase=[3, 6, 12, 24] , UpperCamelCase=7 , UpperCamelCase=4.0 , UpperCamelCase=True , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=False , UpperCamelCase=0.02 , UpperCamelCase=1E-5 , UpperCamelCase=32 , UpperCamelCase=None , UpperCamelCase=None , **UpperCamelCase , ) -> Tuple:
super().__init__(**snake_case__ )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(snake_case__ )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
__lowerCAmelCase = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(snake_case__ ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
class UpperCAmelCase__ ( __a ):
a : Dict = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self ) -> List[str]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return 1E-4 | 705 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
__lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase ) )
]
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
if len(lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
__lowerCAmelCase = len(lowerCamelCase )
__lowerCAmelCase = matrix_length // 2
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [
[a[i][j] for j in range(lowerCamelCase , lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )
]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase )]
__lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase )] for i in range(lowerCamelCase , lowerCamelCase )]
return top_left, top_right, bot_left, bot_right
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
return len(lowerCamelCase ), len(matrix[0] )
def __lowerCAmelCase ( lowerCamelCase : list ):
'''simple docstring'''
print("\n".join(str(lowerCamelCase ) for line in matrix ) )
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = actual_strassen(lowerCamelCase , matrix_subtraction(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase , lowerCamelCase ) , matrix_addition(lowerCamelCase , lowerCamelCase ) )
__lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_addition(lowerCamelCase , lowerCamelCase )
__lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase , lowerCamelCase ) , lowerCamelCase ) , lowerCamelCase )
# construct the new matrix from our 4 quadrants
__lowerCAmelCase = []
for i in range(len(lowerCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowerCAmelCase ( lowerCamelCase : list , lowerCamelCase : list ):
'''simple docstring'''
if matrix_dimensions(lowerCamelCase )[1] != matrix_dimensions(lowerCamelCase )[0]:
__lowerCAmelCase = (
"Unable to multiply these matrices, please check the dimensions.\n"
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
__lowerCAmelCase = matrix_dimensions(lowerCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__lowerCAmelCase = max(*lowerCamelCase , *lowerCamelCase )
__lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase ) ) ) )
__lowerCAmelCase = matrixa
__lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__lowerCAmelCase = actual_strassen(lowerCamelCase , lowerCamelCase )
# Removing the additional zeros
for i in range(0 , lowerCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCAmelCase : Tuple = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCAmelCase : Any = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa)) | 39 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase : Union[str, Any] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 438 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
super().__init__()
A : List[Any] = module
A : Any = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE ) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE ) , )
A : List[str] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) + self.adapter(SCREAMING_SNAKE_CASE )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__magic_name__ = '''bigscience/bloom-1b7'''
# Constant values
__magic_name__ = 2.1_09_65_95_52_69_25_74
__magic_name__ = '''Hello my name is'''
__magic_name__ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
__magic_name__ = 10
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Any = AutoTokenizer.from_pretrained(self.model_name )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''quantization_config''' ) )
A : List[str] = config.to_dict()
A : List[Any] = config.to_diff_dict()
A : Union[str, Any] = config.to_json_string()
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
A : Optional[Any] = self.model_fpaa.get_memory_footprint()
A : Any = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
A : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : List[str] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = BitsAndBytesConfig()
A : Optional[Any] = True
A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : Tuple = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
A : int = self.tokenizer(self.input_text , return_tensors='''pt''' )
A : Dict = self.model_fpaa.to(torch.floataa )
A : Union[str, Any] = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
A : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
A : List[str] = self.model_fpaa.half()
# Check this does not throw an error
A : Tuple = self.model_fpaa.float()
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Tuple = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
"""simple docstring"""
A : int = '''t5-small'''
A : Any = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
A : List[Any] = AutoTokenizer.from_pretrained(cls.model_name )
A : List[str] = '''Translate in German: Hello, my dog is cute'''
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import TaForConditionalGeneration
A : int = TaForConditionalGeneration._keep_in_fpaa_modules
A : Optional[Any] = None
# test with `t5-small`
A : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : List[Any] = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : List[Any] = model.generate(**SCREAMING_SNAKE_CASE )
A : str = modules
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
A : List[str] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
A : List[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : Any = model.generate(**SCREAMING_SNAKE_CASE )
# test with `flan-t5-small`
A : str = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
A : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
A : Tuple = model.generate(**SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# model_name
A : Tuple = '''bigscience/bloom-560m'''
A : int = '''t5-small'''
# Different types of model
A : Optional[int] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Sequence classification model
A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# CausalLM model
A : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
# Seq2seq model
A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''auto''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
super().setUp()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
A : str = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
A : Optional[int] = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
A : Optional[Any] = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE ) , self.EXPECTED_OUTPUTS )
class A ( __snake_case ):
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : int = '''facebook/opt-350m'''
super().setUp()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
A : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
A : Optional[int] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
A : Tuple = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE ) ):
A : Tuple = LoRALayer(module.q_proj , rank=16 )
A : Tuple = LoRALayer(module.k_proj , rank=16 )
A : str = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
A : Dict = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
A : str = model.forward(**SCREAMING_SNAKE_CASE )
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( __snake_case ):
__magic_name__ = '''gpt2-xl'''
__magic_name__ = 3.31_91_85_48_54_15_21_87
| 634 | 0 |
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase__ ( A__ , A__ , A__ , A__ = 100 , ) -> float:
snake_case__ : Optional[int] = x_start
snake_case__ : Any = fnc(A__ )
snake_case__ : List[str] = 0.0
for _ in range(A__ ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case__ : Optional[int] = (x_end - x_start) / steps + xa
snake_case__ : Tuple = fnc(A__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case__ : str = xa
snake_case__ : Any = fxa
return length
if __name__ == "__main__":
def UpperCamelCase__ ( A__ ) -> Optional[int]:
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
lowerCAmelCase__ : Dict = 10
while i <= 10_00_00:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 699 | import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : List[Any] = '''▁'''
lowerCAmelCase__ : int = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __snake_case ( _lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = BertGenerationTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def __a ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case__ : str = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = '<s>'
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(__UpperCamelCase ) , 1002 )
def __a ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = BertGenerationTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
snake_case__ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [285, 46, 10, 170, 382] , )
snake_case__ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def __a ( self ) -> Dict:
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : int = 'Hello World!'
snake_case__ : Union[str, Any] = [18536, 2260, 101]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ : List[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@require_torch
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
snake_case__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ : Optional[int] = ' '.join(__UpperCamelCase )
snake_case__ : int = self.big_tokenizer.encode_plus(__UpperCamelCase , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=__UpperCamelCase )
snake_case__ : Dict = BertGenerationConfig()
snake_case__ : List[str] = BertGenerationEncoder(__UpperCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCamelCase )
model(**__UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Optional[int] = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 699 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> bool:
_lowercase = 0
_lowercase = number
while duplicate > 0:
_lowercase , _lowercase = divmod(snake_case__ , 10 )
fact_sum += factorial(snake_case__ )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
snake_case = int(input("""Enter number: """).strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
) | 67 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
SCREAMING_SNAKE_CASE : Dict = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('sample_euler' )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
SCREAMING_SNAKE_CASE : str = output.images
SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('sample_euler' )
SCREAMING_SNAKE_CASE : List[str] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = sd_pipe([prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
SCREAMING_SNAKE_CASE : Optional[int] = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(
[prompt] , generator=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE : int = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 265 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[Any] = ''''''
for i in table:
res += inp[i - 1]
return res
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
return data[1:] + data[0]
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Union[str, Any] = ''''''
for i in range(len(lowerCAmelCase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: Dict = int('''0b''' + data[0] + data[-1] , 2 )
_lowercase: Union[str, Any] = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: int = message[:4]
_lowercase: List[Any] = message[4:]
_lowercase: List[Any] = apply_table(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase: List[str] = xor(lowerCAmelCase_ , lowerCAmelCase_ )
_lowercase: Optional[Any] = apply_sbox(lowerCAmelCase_ , temp[:4] ) # noqa: E741
_lowercase: List[str] = apply_sbox(lowerCAmelCase_ , temp[4:] )
_lowercase: int = '''0''' * (2 - len(lowerCAmelCase_ )) + l # noqa: E741
_lowercase: Dict = '''0''' * (2 - len(lowerCAmelCase_ )) + r
_lowercase: List[Any] = apply_table(l + r , lowerCAmelCase_ )
_lowercase: Dict = xor(lowerCAmelCase_ , lowerCAmelCase_ )
return temp + right
if __name__ == "__main__":
A__ : Union[str, Any] = input('Enter 10 bit key: ')
A__ : Union[str, Any] = input('Enter 8 bit message: ')
A__ : str = [6, 3, 7, 4, 8, 5, 1_0, 9]
A__ : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
A__ : Dict = [2, 4, 3, 1]
A__ : str = [2, 6, 3, 1, 4, 8, 5, 7]
A__ : Union[str, Any] = [4, 1, 3, 5, 7, 2, 8, 6]
A__ : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
A__ : Dict = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
A__ : Optional[int] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
A__ : Dict = apply_table(key, paa_table)
A__ : Optional[int] = temp[:5]
A__ : Dict = temp[5:]
A__ : int = left_shift(left)
A__ : str = left_shift(right)
A__ : Dict = apply_table(left + right, pa_table)
A__ : str = left_shift(left)
A__ : str = left_shift(right)
A__ : List[str] = left_shift(left)
A__ : Union[str, Any] = left_shift(right)
A__ : Optional[Any] = apply_table(left + right, pa_table)
# encryption
A__ : Tuple = apply_table(message, IP)
A__ : Dict = function(expansion, sa, sa, keya, temp)
A__ : List[Any] = temp[4:] + temp[:4]
A__ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
A__ : Union[str, Any] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
A__ : List[Any] = apply_table(CT, IP)
A__ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
A__ : Optional[int] = temp[4:] + temp[:4]
A__ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
A__ : Optional[Any] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 703 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number | (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number & ~(1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return number ^ (1 << position)
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272 | 0 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
SCREAMING_SNAKE_CASE__ = Dataset.from_dict(snake_case__ )
return dataset
class lowerCamelCase (A__ ):
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ = get_dataset()
SCREAMING_SNAKE_CASE__ = make_duplicate_clusters(__UpperCAmelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = get_dataset()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __UpperCAmelCase )
| 196 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : Union[str, Any] = ["gpt2"]
A_ : Optional[int] = "gpt2"
if is_tf_available():
class lowerCamelCase (tf.Module ):
def __init__( self : Dict , __UpperCAmelCase : str ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase ) / """saved.model"""
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__UpperCAmelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 196 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_lowerCamelCase : Dict = logging.getLogger(__name__)
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=None ) -> int:
super().__init__(
lowerCAmelCase_ , question_encoder_tokenizer=lowerCAmelCase_ , generator_tokenizer=lowerCAmelCase_ , index=lowerCAmelCase_ , init_retrieval=lowerCAmelCase_ , )
UpperCAmelCase_ = None
def UpperCamelCase ( self : List[str] , lowerCAmelCase_ : int ) -> Optional[Any]:
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
UpperCAmelCase_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
UpperCAmelCase_ = str(distributed_port + 1 )
UpperCAmelCase_ = dist.new_group(ranks=lowerCAmelCase_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : int=torch.floataa ) -> List[Any]:
UpperCAmelCase_ = torch.empty(lowerCAmelCase_ , dtype=lowerCAmelCase_ )
dist.scatter(lowerCAmelCase_ , src=0 , scatter_list=lowerCAmelCase_ , group=self.process_group )
return target_tensor
def UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
UpperCAmelCase_ = next((addr for addr in addrs if addr.startswith('''e''' )) , lowerCAmelCase_ )
return ifname
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
UpperCAmelCase_, UpperCAmelCase_ = self._main_retrieve(lowerCAmelCase_ , lowerCAmelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase_ )
# distributed training
UpperCAmelCase_ = dist.get_world_size(group=self.process_group )
# gather logic
UpperCAmelCase_ = None
if self._is_main():
UpperCAmelCase_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(lowerCAmelCase_ )]
dist.gather(torch.tensor(lowerCAmelCase_ ) , dst=0 , gather_list=lowerCAmelCase_ , group=self.process_group )
# scatter logic
UpperCAmelCase_ = question_hidden_states.shape[0]
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if self._is_main():
assert len(lowerCAmelCase_ ) == world_size
UpperCAmelCase_, UpperCAmelCase_ = self._main_retrieve(torch.cat(lowerCAmelCase_ ).numpy() , lowerCAmelCase_ )
UpperCAmelCase_, UpperCAmelCase_ = torch.tensor(lowerCAmelCase_ ), torch.tensor(lowerCAmelCase_ )
UpperCAmelCase_ = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = self._chunk_tensor(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ = self._scattered(lowerCAmelCase_ , [n_queries, n_docs] , target_type=torch.intaa )
UpperCAmelCase_ = self._scattered(lowerCAmelCase_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase_ )
| 407 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''efficientnet'''
def __init__( self : Any , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 6_00 , lowerCAmelCase_ : float = 2.0 , lowerCAmelCase_ : float = 3.1 , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase_ : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , lowerCAmelCase_ : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , lowerCAmelCase_ : List[int] = [] , lowerCAmelCase_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : float = 0.25 , lowerCAmelCase_ : str = "swish" , lowerCAmelCase_ : int = 25_60 , lowerCAmelCase_ : str = "mean" , lowerCAmelCase_ : float = 0.02 , lowerCAmelCase_ : float = 0.001 , lowerCAmelCase_ : float = 0.99 , lowerCAmelCase_ : float = 0.5 , lowerCAmelCase_ : float = 0.2 , **lowerCAmelCase_ : Optional[Any] , ) -> Any:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = width_coefficient
UpperCAmelCase_ = depth_coefficient
UpperCAmelCase_ = depth_divisor
UpperCAmelCase_ = kernel_sizes
UpperCAmelCase_ = in_channels
UpperCAmelCase_ = out_channels
UpperCAmelCase_ = depthwise_padding
UpperCAmelCase_ = strides
UpperCAmelCase_ = num_block_repeats
UpperCAmelCase_ = expand_ratios
UpperCAmelCase_ = squeeze_expansion_ratio
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = pooling_type
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = batch_norm_eps
UpperCAmelCase_ = batch_norm_momentum
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = drop_connect_rate
UpperCAmelCase_ = sum(lowerCAmelCase_ ) * 4
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase ( self : Union[str, Any] ) -> float:
return 1e-5
| 407 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
__UpperCamelCase : Optional[int] = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
__UpperCamelCase : Any = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
__UpperCamelCase : str = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
__UpperCamelCase : Tuple = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 450 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_ ( _A , _A , _A , _A , _A = None , _A = None , _A = None , ):
'''simple docstring'''
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
_A , _A , config=_A )
rag_model.save_pretrained(_A )
# Sanity check.
model_class.from_pretrained(_A )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(_A )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : str = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 493 | 0 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__snake_case = logging.get_logger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
try:
with open(UpperCamelCase_ , 'rb' ) as flax_state_f:
SCREAMING_SNAKE_CASE__ = from_bytes(UpperCamelCase_ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase_ ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE__ = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase_ : x.dtype == jnp.bfloataa , UpperCamelCase_ ) ).values()
if any(UpperCamelCase_ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(
lambda UpperCamelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = flatten_dict(UpperCamelCase_ , sep='.' )
SCREAMING_SNAKE_CASE__ = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE__ = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE__ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE__ = jnp.transpose(UpperCamelCase_ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE__ = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE__ = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
SCREAMING_SNAKE_CASE__ = '.'.join(UpperCamelCase_ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCamelCase_ ) if not isinstance(UpperCamelCase_ , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase_ )
# remove from missing keys
missing_keys.remove(UpperCamelCase_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase_ )
pt_model.load_state_dict(UpperCamelCase_ )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE__ = list(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(UpperCamelCase_ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
return pt_model
| 714 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> list[float]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = coefficient_matrix.shape
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE__ = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase_ )
if colsa != 1:
SCREAMING_SNAKE_CASE__ = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(UpperCamelCase_ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE__ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != rowsa:
SCREAMING_SNAKE_CASE__ = (
'Number of initial values must be equal to number of rows in coefficient '
F'matrix but received {len(UpperCamelCase_ )} and {rowsa}'
)
raise ValueError(UpperCamelCase_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
SCREAMING_SNAKE_CASE__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = table.shape
strictly_diagonally_dominant(UpperCamelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = []
for row in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 0
for col in range(UpperCamelCase_ ):
if col == row:
SCREAMING_SNAKE_CASE__ = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE__ = (temp + val) / denom
new_val.append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = new_val
return [float(UpperCamelCase_ ) for i in new_val]
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = table.shape
SCREAMING_SNAKE_CASE__ = True
for i in range(0 , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase__ : List[str] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _lowercase ( __SCREAMING_SNAKE_CASE=None ) -> List[str]:
if subparsers is not None:
UpperCamelCase__ : Any = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ : List[str] = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ : Tuple = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ : Any = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCamelCase__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ : Optional[int] = defaults.commands
if not args.tpu_name:
UpperCamelCase__ : Any = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ : List[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ : Tuple = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ : Tuple = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ : Tuple = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ : List[Any] = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
UpperCamelCase__ : str = '; '.join(__SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ : Optional[Any] = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(__SCREAMING_SNAKE_CASE )}""" )
return
subprocess.run(__SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def _lowercase ( ) -> int:
UpperCamelCase__ : Tuple = tpu_command_parser()
UpperCamelCase__ : Optional[int] = parser.parse_args()
tpu_command_launcher(__SCREAMING_SNAKE_CASE )
| 410 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[int] = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410 | 1 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_A : str = None
_A : Union[str, Any] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_A : Dict = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : str = None
# Automatically constructed
_UpperCAmelCase : List[str] = "PIL.Image.Image"
_UpperCAmelCase : List[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCAmelCase : Any = field(default="Image" ,init=_UpperCAmelCase ,repr=_UpperCAmelCase )
def __call__( self : str ) ->int:
return self.pa_type
def __lowerCamelCase ( self : List[str] , A : List[Any] ) ->Optional[Any]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(lowercase__ , lowercase__ ):
lowerCamelCase__ : List[Any] = np.array(lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase__ , lowercase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase__ )
elif isinstance(lowercase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase__ )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __lowerCamelCase ( self : Dict , A : Dict , A : Optional[int]=None ) ->Any:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Tuple = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(lowercase__ ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(lowercase__ )
else:
lowerCamelCase__ : Optional[int] = path.split('''::''' )[-1]
try:
lowerCamelCase__ : Dict = string_to_dict(lowercase__ , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : List[Any] = token_per_repo_id.get(lowercase__ )
except ValueError:
lowerCamelCase__ : List[Any] = None
with xopen(lowercase__ , '''rb''' , use_auth_token=lowercase__ ) as f:
lowerCamelCase__ : str = BytesIO(f.read() )
lowerCamelCase__ : str = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : int = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCamelCase ( self : int ) ->str:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def __lowerCamelCase ( self : Optional[int] , A : List[Any] ) ->Dict:
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Tuple = pa.array([None] * len(lowercase__ ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : Any = pa.array([None] * len(lowercase__ ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowerCamelCase__ : Tuple = storage.field('''bytes''' )
else:
lowerCamelCase__ : str = pa.array([None] * len(lowercase__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowerCamelCase__ : List[str] = storage.field('''path''' )
else:
lowerCamelCase__ : List[Any] = pa.array([None] * len(lowercase__ ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Tuple = pa.array(
[encode_np_array(np.array(lowercase__ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Tuple = pa.array([None] * len(lowercase__ ) , type=pa.string() )
lowerCamelCase__ : Optional[int] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowercase__ , self.pa_type )
def __lowerCamelCase ( self : Any , A : Dict ) ->List[Any]:
@no_op_if_value_is_null
def path_to_bytes(A : Any ):
with xopen(lowercase__ , '''rb''' ) as f:
lowerCamelCase__ : List[Any] = f.read()
return bytes_
lowerCamelCase__ : str = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : int = pa.array(
[os.path.basename(lowercase__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(lowercase__ , self.pa_type )
def _a ( ) -> List[str]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _a ( UpperCAmelCase ) -> bytes:
"""simple docstring"""
lowerCamelCase__ : int = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : List[Any] = image.format
else:
lowerCamelCase__ : Dict = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(SCREAMING_SNAKE_CASE_ , format=SCREAMING_SNAKE_CASE_ )
return buffer.getvalue()
def _a ( UpperCAmelCase ) -> dict:
"""simple docstring"""
if hasattr(SCREAMING_SNAKE_CASE_ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def _a ( UpperCAmelCase ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
lowerCamelCase__ : Optional[int] = array.dtype
lowerCamelCase__ : Dict = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[Any] = dtype.kind
lowerCamelCase__ : Union[str, Any] = dtype.itemsize
lowerCamelCase__ : List[str] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : Optional[Any] = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : str = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : List[Any] = np.dtype(SCREAMING_SNAKE_CASE_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : Union[str, Any] = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE_ ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def _a ( UpperCAmelCase ) -> List[dict]:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
lowerCamelCase__ : List[str] = first_non_null_value(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCamelCase__ : Tuple = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
else:
return objs
else:
return objs
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A : Any = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : int = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
_A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 130 | 0 |
'''simple docstring'''
from math import pow
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_lowerCAmelCase = int(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_lowerCAmelCase , _lowerCAmelCase = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_lowerCAmelCase , _lowerCAmelCase = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return current_sum, solutions_count
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "The column name of the images in the files."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(default=__magic_name__ , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE_ : Optional[float] =field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] =field(
default=__magic_name__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {}
if self.train_dir is not None:
UpperCamelCase = self.train_dir
if self.validation_dir is not None:
UpperCamelCase = self.validation_dir
UpperCamelCase = data_files if data_files else None
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
default=__magic_name__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__magic_name__ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
SCREAMING_SNAKE_CASE_ : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ : str =field(default=__magic_name__ , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ : float =field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
SCREAMING_SNAKE_CASE_ : bool =field(
default=__magic_name__ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float =field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def __lowerCamelCase ( _lowercase ) -> Optional[int]:
UpperCamelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __lowerCamelCase ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
UpperCamelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
UpperCamelCase = ds['train'].train_test_split(data_args.train_val_split )
UpperCamelCase = split['train']
UpperCamelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCamelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCamelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
if training_args.do_train:
UpperCamelCase = ds['train'].column_names
else:
UpperCamelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
UpperCamelCase = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase = 'image'
elif "img" in column_names:
UpperCamelCase = 'img'
else:
UpperCamelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase = image_processor.size['shortest_edge']
else:
UpperCamelCase = (image_processor.size['height'], image_processor.size['width'])
UpperCamelCase = Compose(
[
Lambda(lambda _lowercase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_lowercase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_lowercase ):
UpperCamelCase = [transforms(_lowercase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
UpperCamelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
UpperCamelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowercase )
# Compute absolute learning rate
UpperCamelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase = Trainer(
model=_lowercase , args=_lowercase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase = last_checkpoint
UpperCamelCase = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase = trainer.evaluate()
trainer.log_metrics('eval' , _lowercase )
trainer.save_metrics('eval' , _lowercase )
# Write model card and (optionally) push to hub
UpperCamelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def __lowerCamelCase ( _lowercase ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 282 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) ->Union[str, Any]:
if attention_mask is None:
UpperCAmelCase = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowercase :
UpperCamelCase = OPTConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=1_3 , __lowerCamelCase : int=7 , __lowerCamelCase : List[str]=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Any=9_9 , __lowerCamelCase : Optional[int]=1_6 , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=4 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Any=2_0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Dict=1 , __lowerCamelCase : List[Any]=0 , __lowerCamelCase : Union[str, Any]=1_6 , __lowerCamelCase : Union[str, Any]=1_6 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = embed_dim
UpperCAmelCase = word_embed_proj_dim
UpperCAmelCase = False
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
UpperCAmelCase = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _lowercase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> int:
"""simple docstring"""
UpperCAmelCase = TFOPTModel(config=__lowerCamelCase )
UpperCAmelCase = inputs_dict["""input_ids"""]
UpperCAmelCase = input_ids[:1, :]
UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :]
UpperCAmelCase = 1
# first forward pass
UpperCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
UpperCAmelCase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1e-3 )
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = 10
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFOPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : int , __lowerCamelCase : Tuple ):
if hasattr(__lowerCamelCase , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
UpperCAmelCase = model_class(config=__lowerCamelCase )
UpperCAmelCase = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
UpperCAmelCase = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
UpperCAmelCase = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
UpperCAmelCase = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
UpperCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
UpperCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCAmelCase = False
self.assertTrue(__lowerCamelCase )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
@require_tf
class __lowercase ( unittest.TestCase ):
UpperCamelCase = 99
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCAmelCase = input_ids.shape[0]
UpperCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
UpperCAmelCase = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
UpperCAmelCase = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
UpperCAmelCase = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-3 ) )
UpperCAmelCase = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
UpperCAmelCase = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4e-2 ) )
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().setUp()
UpperCAmelCase = """facebook/opt-350m"""
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
UpperCAmelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
UpperCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCAmelCase = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
UpperCAmelCase = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
UpperCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
@require_tf
@slow
class __lowercase ( unittest.TestCase ):
@property
def _lowercase ( self : Any ) -> str:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """facebook/opt-125m"""
UpperCAmelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
UpperCAmelCase = []
UpperCAmelCase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""tf""" ).input_ids
UpperCAmelCase = model.generate(__lowerCamelCase , max_length=1_0 )
UpperCAmelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase = """facebook/opt-350m"""
UpperCAmelCase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
UpperCAmelCase = """left"""
# use different length sentences to test batching
UpperCAmelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""tf""" , padding=__lowerCamelCase )
UpperCAmelCase = inputs["""input_ids"""]
UpperCAmelCase = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs["""attention_mask"""] )
UpperCAmelCase = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
UpperCAmelCase = model.generate(input_ids=__lowerCamelCase )
UpperCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
UpperCAmelCase = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
UpperCAmelCase = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
UpperCAmelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
UpperCAmelCase = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def _lowercase ( self : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = """facebook/opt-350m"""
UpperCAmelCase = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
UpperCAmelCase = []
UpperCAmelCase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
UpperCAmelCase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
UpperCAmelCase = tokenizer(__lowerCamelCase , return_tensors="""tf""" ).input_ids
UpperCAmelCase = model.generate(__lowerCamelCase , max_length=1_0 )
UpperCAmelCase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 707 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__a = """\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
"""
__a = """\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
"""
__a = """
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for 'record': list of question-answer dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'prediction_text': the predicted answer text
- for 'multirc': list of question-answer dictionaries with the following keys:
- 'idx': index of the question-answer pair as specified by the dataset
- 'prediction': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for 'record': list of question-answers dictionaries with the following keys:
- 'idx': index of the question as specified by the dataset
- 'answers': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for 'record':
- 'exact_match': Exact match between answer and gold answer
- 'f1': F1 score
- for 'multirc':
- 'exact_match': Exact match between answer and gold answer
- 'f1_m': Per-question macro-F1 score
- 'f1_a': Average F1 score over all answers
- for 'axb':
'matthews_correlation': Matthew Correlation
- for 'cb':
- 'accuracy': Accuracy
- 'f1': F1 score
- for all others:
- 'accuracy': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'cb')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'record')
>>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]
>>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')
>>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}
>>> super_glue_metric = datasets.load_metric('super_glue', 'axb')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[str]:
return float((preds == labels).mean() )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="binary" ) ->Union[str, Any]:
UpperCAmelCase = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = {}
for id_pred, label in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCAmelCase = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase = [(pred, label)]
UpperCAmelCase , UpperCAmelCase = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase , UpperCAmelCase = zip(*lowerCAmelCase_ )
UpperCAmelCase = fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ , average="""macro""" )
fas.append(lowerCAmelCase_ )
UpperCAmelCase = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase_ ) )
ems.append(lowerCAmelCase_ )
UpperCAmelCase = float(sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) )
UpperCAmelCase = sum(lowerCAmelCase_ ) / len(lowerCAmelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCAmelCase_ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _lowercase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCAmelCase = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCAmelCase = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 627 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list ) -> list:
__A : Dict = False
while is_sorted is False: # Until all the indices are traversed keep looping
__A : int = True
for i in range(0 , len(__snake_case ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__A ,__A : List[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : int = False
for i in range(1 , len(__snake_case ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__A ,__A : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
__A : str = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowercase__ : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
lowercase__ : Union[str, Any] = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list) | 8 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
'''simple docstring'''
import pprint
import requests
SCREAMING_SNAKE_CASE : Dict = "https://zenquotes.io/api"
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 718 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class snake_case :
"""simple docstring"""
_a = 42
_a = None
_a = None
def _UpperCamelCase ( lowerCAmelCase__: TreeNode | None ) -> bool:
# Validation
def is_valid_tree(lowerCAmelCase__: TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowerCAmelCase__: TreeNode | None ,lowerCAmelCase__: float ,lowerCAmelCase__: float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,lowerCAmelCase__ ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ ,-float('inf' ) ,float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 | 0 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase_ ( __A, __A=1 ) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def lowerCAmelCase_ ( __A, __A=0 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = []
for old_item in old_list:
UpperCAmelCase__ = old_item.replace("in_layers.0", "norm1" )
UpperCAmelCase__ = new_item.replace("in_layers.2", "conv1" )
UpperCAmelCase__ = new_item.replace("out_layers.0", "norm2" )
UpperCAmelCase__ = new_item.replace("out_layers.3", "conv2" )
UpperCAmelCase__ = new_item.replace("emb_layers.1", "time_emb_proj" )
UpperCAmelCase__ = new_item.replace("skip_connection", "conv_shortcut" )
UpperCAmelCase__ = shave_segments(__A, n_shave_prefix_segments=__A )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( __A, __A=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = []
for old_item in old_list:
UpperCAmelCase__ = old_item
UpperCAmelCase__ = new_item.replace("norm.weight", "group_norm.weight" )
UpperCAmelCase__ = new_item.replace("norm.bias", "group_norm.bias" )
UpperCAmelCase__ = new_item.replace("proj_out.weight", "proj_attn.weight" )
UpperCAmelCase__ = new_item.replace("proj_out.bias", "proj_attn.bias" )
UpperCAmelCase__ = shave_segments(__A, n_shave_prefix_segments=__A )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def lowerCAmelCase_ ( __A, __A, __A, __A=None, __A=None, __A=None ) -> Any:
'''simple docstring'''
assert isinstance(__A, __A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase__ = old_checkpoint[path]
UpperCAmelCase__ = old_tensor.shape[0] // 3
UpperCAmelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase__ = old_tensor.shape[0] // config["num_head_channels"] // 3
UpperCAmelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = old_tensor.split(channels // num_heads, dim=1 )
UpperCAmelCase__ = query.reshape(__A )
UpperCAmelCase__ = key.reshape(__A )
UpperCAmelCase__ = value.reshape(__A )
for path in paths:
UpperCAmelCase__ = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase__ = new_path.replace("middle_block.0", "mid_block.resnets.0" )
UpperCAmelCase__ = new_path.replace("middle_block.1", "mid_block.attentions.0" )
UpperCAmelCase__ = new_path.replace("middle_block.2", "mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase__ = new_path.replace(replacement["old"], replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase__ = old_checkpoint[path["old"]][:, :, 0]
else:
UpperCAmelCase__ = old_checkpoint[path["old"]]
def lowerCAmelCase_ ( __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = {}
UpperCAmelCase__ = checkpoint["time_embed.0.weight"]
UpperCAmelCase__ = checkpoint["time_embed.0.bias"]
UpperCAmelCase__ = checkpoint["time_embed.2.weight"]
UpperCAmelCase__ = checkpoint["time_embed.2.bias"]
UpperCAmelCase__ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase__ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase__ = checkpoint["out.0.weight"]
UpperCAmelCase__ = checkpoint["out.0.bias"]
UpperCAmelCase__ = checkpoint["out.2.weight"]
UpperCAmelCase__ = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(__A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(__A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase__ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
UpperCAmelCase__ = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(__A )
}
for i in range(1, __A ):
UpperCAmelCase__ = (i - 1) // (config["num_res_blocks"] + 1)
UpperCAmelCase__ = (i - 1) % (config["num_res_blocks"] + 1)
UpperCAmelCase__ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase__ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase__ = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase__ = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase__ = renew_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""input_blocks.{i}.0""", "new": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase__ = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
__A, __A, __A, additional_replacements=[meta_path, resnet_op], config=__A )
if len(__A ):
UpperCAmelCase__ = renew_attention_paths(__A )
UpperCAmelCase__ = {
"old": f"""input_blocks.{i}.1""",
"new": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase__ = {
f"""input_blocks.{i}.1.qkv.bias""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"key": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__A, __A, __A, additional_replacements=[meta_path], attention_paths_to_split=__A, config=__A, )
UpperCAmelCase__ = middle_blocks[0]
UpperCAmelCase__ = middle_blocks[1]
UpperCAmelCase__ = middle_blocks[2]
UpperCAmelCase__ = renew_resnet_paths(__A )
assign_to_checkpoint(__A, __A, __A, config=__A )
UpperCAmelCase__ = renew_resnet_paths(__A )
assign_to_checkpoint(__A, __A, __A, config=__A )
UpperCAmelCase__ = renew_attention_paths(__A )
UpperCAmelCase__ = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
__A, __A, __A, attention_paths_to_split=__A, config=__A )
for i in range(__A ):
UpperCAmelCase__ = i // (config["num_res_blocks"] + 1)
UpperCAmelCase__ = i % (config["num_res_blocks"] + 1)
UpperCAmelCase__ = [shave_segments(__A, 2 ) for name in output_blocks[i]]
UpperCAmelCase__ = {}
for layer in output_block_layers:
UpperCAmelCase__ , UpperCAmelCase__ = layer.split("." )[0], shave_segments(__A, 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__A )
else:
UpperCAmelCase__ = [layer_name]
if len(__A ) > 1:
UpperCAmelCase__ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase__ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase__ = renew_resnet_paths(__A )
UpperCAmelCase__ = renew_resnet_paths(__A )
UpperCAmelCase__ = {"old": f"""output_blocks.{i}.0""", "new": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(__A, __A, __A, additional_replacements=[meta_path], config=__A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase__ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
UpperCAmelCase__ = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase__ = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(__A ) == 2:
UpperCAmelCase__ = []
if len(__A ):
UpperCAmelCase__ = renew_attention_paths(__A )
UpperCAmelCase__ = {
"old": f"""output_blocks.{i}.1""",
"new": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase__ = {
f"""output_blocks.{i}.1.qkv.bias""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"key": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"query": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"value": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
__A, __A, __A, additional_replacements=[meta_path], attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None, config=__A, )
else:
UpperCAmelCase__ = renew_resnet_paths(__A, n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase__ = ".".join(["output_blocks", str(__A ), path["old"]] )
UpperCAmelCase__ = ".".join(["up_blocks", str(__A ), "resnets", str(__A ), path["new"]] )
UpperCAmelCase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase__ = json.loads(f.read())
UpperCamelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 486 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = b.T
UpperCAmelCase__ = np.sum(np.square(__A ), axis=1 )
UpperCAmelCase__ = np.sum(np.square(__A ), axis=0 )
UpperCAmelCase__ = np.matmul(__A, __A )
UpperCAmelCase__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = x.reshape(-1, 3 )
UpperCAmelCase__ = squared_euclidean_distance(__A, __A )
return np.argmin(__A, axis=1 )
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = ['pixel_values']
def __init__(self : Optional[int] , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
UpperCAmelCase__ = np.array(__UpperCAmelCase ) if clusters is not None else None
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = do_color_quantize
def lowercase_ (self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
__UpperCAmelCase , size=(size["height"], size["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase__ = rescale(image=__UpperCAmelCase , scale=1 / 127.5 , data_format=__UpperCAmelCase )
UpperCAmelCase__ = image - 1
return image
def lowercase_ (self : Optional[int] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(__UpperCAmelCase )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase__ = clusters if clusters is not None else self.clusters
UpperCAmelCase__ = np.array(__UpperCAmelCase )
UpperCAmelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=__UpperCAmelCase ) for image in images]
if do_color_quantize:
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase__ = np.array(__UpperCAmelCase )
UpperCAmelCase__ = color_quantize(__UpperCAmelCase , __UpperCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase__ = images.shape[0]
UpperCAmelCase__ = images.reshape(__UpperCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase__ = list(__UpperCAmelCase )
else:
UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
UpperCAmelCase__ = {"input_ids": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 486 | 1 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(__a , __a ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase_ ( __a : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(__a , __a ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349 | 1 |
import argparse
import os
import re
import packaging.version
__lowerCamelCase : Dict = "examples/"
__lowerCamelCase : int = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCamelCase : Dict = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
__lowerCamelCase : int = "README.md"
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace("VERSION" , __snake_case )
UpperCAmelCase = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__snake_case )
def lowerCamelCase_(lowerCamelCase_ ) -> str:
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern="examples" )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_=False ) -> Dict:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowerCamelCase_() -> Union[str, Any]:
UpperCAmelCase = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase = "1. Want to contribute a new model?"
with open(__snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__snake_case )
def lowerCamelCase_() -> str:
with open(REPLACE_FILES["init"] , "r" ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS["init"][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowerCamelCase_(lowerCamelCase_=False ) -> Optional[Any]:
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
UpperCAmelCase = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(F'Which version are you releasing? [{default_version}]' )
if len(__snake_case ) == 0:
UpperCAmelCase = default_version
print(F'Updating version to {version}.' )
global_version_update(__snake_case , patch=__snake_case )
def lowerCamelCase_() -> Any:
UpperCAmelCase = get_version()
UpperCAmelCase = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(F'Which version are we developing now? [{dev_version}]' )
if len(__snake_case ) == 0:
UpperCAmelCase = dev_version
print(F'Updating version to {version}.' )
global_version_update(__snake_case )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCamelCase : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 323 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=64 , __a=2 , __a=3 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=[1, 16, 4, 4] , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_UpperCamelCase = (self.image_size // 32) ** 2
_UpperCamelCase = num_patches + 1
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = ViTHybridForImageClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase__ = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = ViTHybridModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear))
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_UpperCamelCase = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = ViTHybridModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase( unittest.TestCase ):
@cached_property
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
__a)
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''').to(__a)
# forward pass
with torch.no_grad():
_UpperCamelCase = model(**__a)
# verify the logits
_UpperCamelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor([-1.9090, -0.4993, -0.2389]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
@slow
@require_accelerate
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
_UpperCamelCase = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=__a , return_tensors='''pt''')
_UpperCamelCase = model(**__a)
_UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
_UpperCamelCase = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 19 | 0 |
'''simple docstring'''
import os
import sys
UpperCamelCase__ : Tuple = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase__ : Any = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCamelCase( *_A : List[Any] , **_A : List[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCamelCase( *_A : Optional[Any] , **_A : Dict ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCamelCase( *_A : Union[str, Any] , **_A : str ):
'''simple docstring'''
return AutoModel.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCamelCase( *_A : List[str] , **_A : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCamelCase( *_A : List[Any] , **_A : str ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCamelCase( *_A : List[Any] , **_A : Optional[Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*_A , **_A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCamelCase( *_A : Tuple , **_A : Dict ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*_A , **_A )
| 496 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ : Union[str, Any] = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 496 | 1 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase__ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase__ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = None
for i in sorted(lowercase_ , reverse=lowercase_):
__snake_case = Node(lowercase_ , self.head)
def __iter__( self) -> Iterator[int]:
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next_node
def __len__( self) -> int:
return sum(1 for _ in self)
def __str__( self) -> str:
return " -> ".join([str(lowercase_) for node in self])
def A ( snake_case__ : SortedLinkedList , snake_case__ : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[int] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 313 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A_ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) )
self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) )
def lowercase ( self : int ):
_UpperCAmelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = get_activation("gelu_10" )
_UpperCAmelCase = torch_builtin(snake_case_ )
_UpperCAmelCase = geluaa(snake_case_ )
_UpperCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(snake_case_ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowercase ( self : Any ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(snake_case_ ):
get_activation("bogus" )
with self.assertRaises(snake_case_ ):
get_activation(snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = get_activation("gelu" )
_UpperCAmelCase = 1
_UpperCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = acta.a
| 716 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :List[str] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 | 0 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ : list[int] ) -> list[int]: # This function is recursive
UpperCamelCase : List[Any] = len(snake_case__ )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCamelCase : Tuple = array[0]
UpperCamelCase : str = False
UpperCamelCase : Tuple = 1
UpperCamelCase : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCamelCase : Optional[int] = True
UpperCamelCase : Tuple = [element for element in array[i:] if element >= array[i]]
UpperCamelCase : str = longest_subsequence(snake_case__ )
if len(snake_case__ ) > len(snake_case__ ):
UpperCamelCase : Union[str, Any] = temp_array
else:
i += 1
UpperCamelCase : Any = [element for element in array[1:] if element >= pivot]
UpperCamelCase : Dict = [pivot, *longest_subsequence(snake_case__ )]
if len(snake_case__ ) > len(snake_case__ ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ = logging.get_logger(__name__)
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , *A , **A ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , A , )
super().__init__(*A , **A )
| 437 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a : Optional[int] = logging.get_logger(__name__)
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
UpperCAmelCase__ = set()
UpperCAmelCase__ = []
def parse_line(_SCREAMING_SNAKE_CASE ):
for line in fp:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase__ = """\n""".join(_SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(_SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
UpperCAmelCase__ = line.strip()
buffer.append(_SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
UpperCAmelCase__ = set()
UpperCAmelCase__ = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[Any]:
return values.split(""",""" )
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a : Union[str, Any] = parser.parse_args()
a : str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a : int = extract_warnings(args.output_dir, args.targets)
a : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 422 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Any = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ) ->str:
UpperCAmelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , ):
super().__init__()
self.register_modules(
unet=__lowercase , scheduler=__lowercase , movq=__lowercase , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if latents is None:
UpperCAmelCase__ = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCAmelCase__ = latents.to(__lowercase )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def A__ ( self , __lowercase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
UpperCAmelCase__ = torch.device(F'''cuda:{gpu_id}''' )
UpperCAmelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowercase , __lowercase )
def A__ ( self , __lowercase=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
UpperCAmelCase__ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(__lowercase , __lowercase , prev_module_hook=__lowercase )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowercase )
def __call__( self , __lowercase , __lowercase , __lowercase , __lowercase = 512 , __lowercase = 512 , __lowercase = 100 , __lowercase = 4.0 , __lowercase = 1 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , ):
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = guidance_scale > 1.0
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
if isinstance(__lowercase , __lowercase ):
UpperCAmelCase__ = torch.cat(__lowercase , dim=0 )
UpperCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = hint.repeat_interleave(__lowercase , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
UpperCAmelCase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__lowercase )
self.scheduler.set_timesteps(__lowercase , device=__lowercase )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.movq.config.latent_channels
UpperCAmelCase__ , UpperCAmelCase__ = downscale_height_and_width(__lowercase , __lowercase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __lowercase , __lowercase , __lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {"""image_embeds""": image_embeds, """hint""": hint}
UpperCAmelCase__ = self.unet(
sample=__lowercase , timestep=__lowercase , encoder_hidden_states=__lowercase , added_cond_kwargs=__lowercase , return_dict=__lowercase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
__lowercase , __lowercase , __lowercase , generator=__lowercase , )[0]
# post-processing
UpperCAmelCase__ = self.movq.decode(__lowercase , force_not_quantize=__lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 422 | 1 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while second != 0:
UpperCamelCase : List[str] = first & second
first ^= second
UpperCamelCase : Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ : int = int(input("""Enter the first number: """).strip())
__magic_name__ : Tuple = int(input("""Enter the second number: """).strip())
print(f'''{add(first, second) = }''')
| 102 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0_2 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = relative_attention
_lowercase = position_biased_input
_lowercase = pos_att_type
_lowercase = scope
def __lowerCAmelCase ( self ):
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase = ids_tensor([self.batch_size] , self.num_choices )
_lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_lowercase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_lowercase = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = self.num_labels
_lowercase = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = self.num_labels
_lowercase = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ):
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCAmelCase ( self ):
_lowercase = DebertaVaModelTester(self )
_lowercase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def __lowerCAmelCase ( self ):
pass
@slow
def __lowerCAmelCase ( self ):
_lowercase = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_lowercase = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowercase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_lowercase = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 706 | from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( __a : List[Any] , __a : Dict , __a : Optional[Any]=None , __a : List[Any]=None ) -> str:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = OPTConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 'gelu'
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=4 , lowerCAmelCase_=4 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=20 , lowerCAmelCase_=2 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=16 , lowerCAmelCase_=16 , ):
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
_lowercase =embed_dim
_lowercase =word_embed_proj_dim
_lowercase =False
def __lowerCAmelCase ( self ):
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowercase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowercase =self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
_lowercase =prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =TFOPTModel(config=lowerCAmelCase_ )
_lowercase =inputs_dict["input_ids"]
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict["attention_mask"][:1, :]
_lowercase =1
# first forward pass
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowercase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_lowercase =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowercase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1e-3 )
@require_tf
class _a ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFOPTForCausalLM,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 10
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCAmelCase_ , lowerCAmelCase_ ):
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowercase =model_class(config=lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_lowercase =_get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowercase =size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
_lowercase =True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
_lowercase =True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowercase =False
self.assertTrue(lowerCAmelCase_ )
def __lowerCamelCase ( __a : Tuple ) -> Dict:
return tf.constant(__a , dtype=tf.intaa )
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 99
def __lowerCAmelCase ( self ):
_lowercase =tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowercase =tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowercase =input_ids.shape[0]
_lowercase =OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self ):
_lowercase =TFOPTModel.from_pretrained("facebook/opt-350m" )
_lowercase =_long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowercase =tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_lowercase =model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
_lowercase =(1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
_lowercase =tf.constant(
[[-0.2_8_7_3, -1.9_2_1_8, -0.3_0_3_3], [-1.2_7_1_0, -0.1_3_3_8, -0.1_9_0_2], [0.4_0_9_5, 0.1_2_1_4, -1.3_1_2_1]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-3 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4e-2 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
super().setUp()
_lowercase ="facebook/opt-350m"
def __lowerCAmelCase ( self ):
_lowercase =TFOPTForCausalLM.from_pretrained(self.path_model )
_lowercase =GPTaTokenizer.from_pretrained(self.path_model )
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowercase =tf.constant(
[
[1.3_8_5_1, -1_3.8_9_2_3, -1_0.5_2_2_9, -1_0.7_5_3_3, -0.2_3_0_9, -1_0.2_3_8_4, -0.5_3_6_5, -9.0_9_4_7, -5.1_6_7_0],
[-4.7_0_7_3, -1_0.6_2_7_6, -3.9_4_1_5, -2_1.5_2_4_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2, -0.2_8_2_2],
[0.6_2_4_7, -3.4_2_2_9, -8.9_1_7_9, -1.4_2_9_7, -1_4.1_6_5_0, 1.4_1_4_6, -9.0_2_1_8, -0.2_7_0_3, -0.2_7_0_3],
[6.4_7_8_3, -1.9_9_1_3, -1_0.7_9_2_6, -2.3_3_3_6, 1.5_0_9_2, -0.9_9_7_4, -6.8_2_1_3, 1.3_4_7_7, 1.3_4_7_7],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
_lowercase =tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_lowercase =tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-4 ) )
@require_tf
@slow
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-125m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
_lowercase ="left"
# use different length sentences to test batching
_lowercase =[
"Hello, my dog is a little",
"Today, I",
]
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" , padding=lowerCAmelCase_ )
_lowercase =inputs["input_ids"]
_lowercase =model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["attention_mask"] )
_lowercase =tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ )
_lowercase =inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_lowercase =tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_lowercase =model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_lowercase =[
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def __lowerCAmelCase ( self ):
_lowercase ="facebook/opt-350m"
_lowercase =[
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_lowercase =[]
_lowercase =GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_lowercase =TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_lowercase =tokenizer(lowerCAmelCase_ , return_tensors="tf" ).input_ids
_lowercase =model.generate(lowerCAmelCase_ , max_length=10 )
_lowercase =tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 594 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowercase = value
else:
lowercase = value
return new_state_dict
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[:256, :]
lowercase = in_proj_bias[:256]
lowercase = in_proj_weight[256:512, :]
lowercase = in_proj_bias[256:512]
lowercase = in_proj_weight[-256:, :]
lowercase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[:256, :]
lowercase = in_proj_bias[:256]
lowercase = in_proj_weight[256:512, :]
lowercase = in_proj_bias[256:512]
lowercase = in_proj_weight[-256:, :]
lowercase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase = in_proj_weight_cross_attn[:256, :]
lowercase = in_proj_bias_cross_attn[:256]
lowercase = in_proj_weight_cross_attn[256:512, :]
lowercase = in_proj_bias_cross_attn[256:512]
lowercase = in_proj_weight_cross_attn[-256:, :]
lowercase = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = image.size
lowercase = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = 800 if 'detection' in checkpoint_url else 1000
lowercase = target_max_size / current_max_size
lowercase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = F.to_tensor(__SCREAMING_SNAKE_CASE )
lowercase = F.normalize(__SCREAMING_SNAKE_CASE , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info('Converting model...' )
# load original state dict
lowercase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = rename_backbone_keys(__SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(__SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowercase = state_dict.pop(__SCREAMING_SNAKE_CASE )
lowercase = val
# create HuggingFace model and load state dict
lowercase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase = 15
lowercase = 2
lowercase = {0: 'table', 1: 'table rotated'}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
else:
lowercase = 125
lowercase = 6
lowercase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
lowercase = TableTransformerForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
lowercase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
lowercase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=__SCREAMING_SNAKE_CASE )
lowercase = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' )
lowercase = normalize(resize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
lowercase = model(__SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
lowercase = (1, 15, 3)
lowercase = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
lowercase = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
lowercase = (1, 125, 7)
lowercase = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
lowercase = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
lowercase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 84 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = 'sew'
def __init__( self , snake_case_=32 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3_072 , snake_case_=2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_=False , snake_case_=128 , snake_case_=16 , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=256 , snake_case_=0 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
'''simple docstring'''
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
A__ : Dict = hidden_size
A__ : Dict = feat_extract_norm
A__ : str = feat_extract_activation
A__ : Optional[Any] = list(snake_case_ )
A__ : str = list(snake_case_ )
A__ : Any = list(snake_case_ )
A__ : Any = conv_bias
A__ : Any = num_conv_pos_embeddings
A__ : Any = num_conv_pos_embedding_groups
A__ : str = len(self.conv_dim )
A__ : Tuple = num_hidden_layers
A__ : int = intermediate_size
A__ : Union[str, Any] = squeeze_factor
A__ : Union[str, Any] = hidden_act
A__ : List[str] = num_attention_heads
A__ : List[str] = hidden_dropout
A__ : Dict = attention_dropout
A__ : Tuple = activation_dropout
A__ : Optional[int] = feat_proj_dropout
A__ : Optional[Any] = final_dropout
A__ : int = layerdrop
A__ : List[Any] = layer_norm_eps
A__ : int = initializer_range
A__ : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : Optional[Any] = apply_spec_augment
A__ : int = mask_time_prob
A__ : Tuple = mask_time_length
A__ : Optional[Any] = mask_time_min_masks
A__ : Any = mask_feature_prob
A__ : List[Any] = mask_feature_length
A__ : Any = mask_feature_min_masks
# ctc loss
A__ : str = ctc_loss_reduction
A__ : List[Any] = ctc_zero_infinity
# sequence classification
A__ : Union[str, Any] = use_weighted_layer_sum
A__ : str = classifier_proj_size
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 363 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE : Optional[int] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__SCREAMING_SNAKE_CASE : Dict = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
__SCREAMING_SNAKE_CASE : int = np.concatenate(_lowerCamelCase , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_lowerCamelCase ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE : int = image.transpose(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : str = 2.0 * image - 1.0
__SCREAMING_SNAKE_CASE : int = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(_lowerCamelCase , dim=0 )
return image
def _lowerCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=0.9995 ):
"""simple docstring"""
if not isinstance(_lowerCamelCase , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : Optional[Any] = va.device
__SCREAMING_SNAKE_CASE : List[str] = va.cpu().numpy()
__SCREAMING_SNAKE_CASE : List[str] = va.cpu().numpy()
__SCREAMING_SNAKE_CASE : Optional[int] = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
__SCREAMING_SNAKE_CASE : List[str] = (1 - t) * va + t * va
else:
__SCREAMING_SNAKE_CASE : int = np.arccos(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = np.sin(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = theta_a * t
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.sin(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
__SCREAMING_SNAKE_CASE : Union[str, Any] = sin_theta_t / sin_theta_a
__SCREAMING_SNAKE_CASE : str = sa * va + sa * va
if inputs_are_torch:
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = F.normalize(_lowerCamelCase , dim=-1 )
__SCREAMING_SNAKE_CASE : Any = F.normalize(_lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any ):
"""simple docstring"""
for param in model.parameters():
__SCREAMING_SNAKE_CASE : int = value
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
def __init__( self : Tuple , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=None , )->Dict:
super().__init__()
self.register_modules(
vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , clip_model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , coca_model=UpperCamelCase__ , coca_tokenizer=UpperCamelCase__ , coca_transform=UpperCamelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size , UpperCamelCase__ )
else feature_extractor.size["shortest_edge"]
)
__SCREAMING_SNAKE_CASE : Optional[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , UpperCamelCase__ )
set_requires_grad(self.clip_model , UpperCamelCase__ )
def __snake_case ( self : List[str] , UpperCamelCase : List[Any] = "auto" )->List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE : Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __snake_case ( self : Tuple )->str:
self.enable_attention_slicing(UpperCamelCase__ )
def __snake_case ( self : Any )->Union[str, Any]:
set_requires_grad(self.vae , UpperCamelCase__ )
def __snake_case ( self : List[Any] )->List[Any]:
set_requires_grad(self.vae , UpperCamelCase__ )
def __snake_case ( self : Dict )->Optional[int]:
set_requires_grad(self.unet , UpperCamelCase__ )
def __snake_case ( self : List[str] )->Union[str, Any]:
set_requires_grad(self.unet , UpperCamelCase__ )
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : int )->List[str]:
__SCREAMING_SNAKE_CASE : str = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max(num_inference_steps - init_timestep , 0 )
__SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int]=None )->int:
if not isinstance(UpperCamelCase__ , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase__ )}""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ )
]
__SCREAMING_SNAKE_CASE : List[str] = torch.cat(UpperCamelCase__ , dim=0 )
else:
__SCREAMING_SNAKE_CASE : str = self.vae.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE : str = 0.1_8_2_1_5 * init_latents
__SCREAMING_SNAKE_CASE : List[str] = init_latents.repeat_interleave(UpperCamelCase__ , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor(init_latents.shape , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = init_latents
return latents
def __snake_case ( self : int , UpperCamelCase : Dict )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = self.coca_transform(UpperCamelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__SCREAMING_SNAKE_CASE : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__SCREAMING_SNAKE_CASE : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def __snake_case ( self : int , UpperCamelCase : str , UpperCamelCase : List[Any] )->Dict:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.preprocess(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : str = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__SCREAMING_SNAKE_CASE : Optional[int] = self.clip_model.get_image_features(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = image_embeddings_clip.repeat_interleave(UpperCamelCase__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __snake_case ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , )->List[str]:
__SCREAMING_SNAKE_CASE : Optional[Any] = latents.detach().requires_grad_()
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.alphas_cumprod[timestep]
__SCREAMING_SNAKE_CASE : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__SCREAMING_SNAKE_CASE : Tuple = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__SCREAMING_SNAKE_CASE : Optional[int] = torch.sqrt(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.sigmas[index]
__SCREAMING_SNAKE_CASE : Union[str, Any] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE : List[Any] = 1 / 0.1_8_2_1_5 * sample
__SCREAMING_SNAKE_CASE : int = self.vae.decode(UpperCamelCase__ ).sample
__SCREAMING_SNAKE_CASE : Any = (image / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Any = transforms.Resize(self.feature_extractor_size )(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.normalize(UpperCamelCase__ ).to(latents.dtype )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.clip_model.get_image_features(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = spherical_dist_loss(UpperCamelCase__ , UpperCamelCase__ ).mean() * clip_guidance_scale
__SCREAMING_SNAKE_CASE : List[Any] = -torch.autograd.grad(UpperCamelCase__ , UpperCamelCase__ )[0]
if isinstance(self.scheduler , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = latents.detach() + grads * (sigma**2)
__SCREAMING_SNAKE_CASE : List[str] = noise_pred_original
else:
__SCREAMING_SNAKE_CASE : List[str] = noise_pred_original - torch.sqrt(UpperCamelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : Dict = 5_1_2 , UpperCamelCase : Optional[int] = 5_1_2 , UpperCamelCase : Any = 0.6 , UpperCamelCase : str = 5_0 , UpperCamelCase : str = 7.5 , UpperCamelCase : Dict = 1 , UpperCamelCase : List[Any] = 0.0 , UpperCamelCase : Any = 1_0_0 , UpperCamelCase : int = None , UpperCamelCase : Any = "pil" , UpperCamelCase : Dict = True , UpperCamelCase : Optional[int] = 0.8 , UpperCamelCase : Optional[Any] = 0.1 , UpperCamelCase : Tuple = 0.1 , )->Union[str, Any]:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCamelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCamelCase__ , torch.Generator ) and batch_size > 1:
__SCREAMING_SNAKE_CASE : str = [generator] + [None] * (batch_size - 1)
__SCREAMING_SNAKE_CASE : str = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__SCREAMING_SNAKE_CASE : Any = [x[0] for x in coca_is_none if x[1]]
__SCREAMING_SNAKE_CASE : int = ", ".join(UpperCamelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__SCREAMING_SNAKE_CASE : int = self.get_image_description(UpperCamelCase__ )
if style_prompt is None:
if len(UpperCamelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__SCREAMING_SNAKE_CASE : Any = self.get_image_description(UpperCamelCase__ )
# get prompt text embeddings for content and style
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__SCREAMING_SNAKE_CASE : Any = self.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : int = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__SCREAMING_SNAKE_CASE : str = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# duplicate text embeddings for each generation per prompt
__SCREAMING_SNAKE_CASE : Optional[int] = text_embeddings.repeat_interleave(UpperCamelCase__ , dim=0 )
# set timesteps
__SCREAMING_SNAKE_CASE : Dict = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__SCREAMING_SNAKE_CASE : int = {}
if accepts_offset:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
self.scheduler.set_timesteps(UpperCamelCase__ , **UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
__SCREAMING_SNAKE_CASE : Dict = timesteps[:1].repeat(UpperCamelCase__ )
# Preprocess image
__SCREAMING_SNAKE_CASE : Dict = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : int = preprocess(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_latents(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text_embeddings.dtype , self.device , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = slerp(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if clip_guidance_scale > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_clip_image_embeddings(UpperCamelCase__ , UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : str = slerp(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE : Any = content_text_input.input_ids.shape[-1]
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer([""] , padding="max_length" , max_length=UpperCamelCase__ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.repeat_interleave(UpperCamelCase__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE : Dict = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device="cpu" , dtype=UpperCamelCase__ ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE : int = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE : Tuple = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE : int = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE : Dict = eta
# check if the scheduler accepts generator
__SCREAMING_SNAKE_CASE : Tuple = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator
with self.progress_bar(total=UpperCamelCase__ ):
for i, t in enumerate(UpperCamelCase__ ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
__SCREAMING_SNAKE_CASE : Dict = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__SCREAMING_SNAKE_CASE : Tuple = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = self.cond_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__SCREAMING_SNAKE_CASE : Tuple = 1 / 0.1_8_2_1_5 * latents
__SCREAMING_SNAKE_CASE : Dict = self.vae.decode(UpperCamelCase__ ).sample
__SCREAMING_SNAKE_CASE : str = (image / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
| 700 |
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : int ):
"""simple docstring"""
if height >= 1:
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
move_disk(__lowerCamelCase , __lowerCamelCase )
move_tower(height - 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
print("moving disk from" , __lowerCamelCase , "to" , __lowerCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = int(input("Height of hanoi: " ).strip() )
move_tower(__lowerCamelCase , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 447 | 0 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : int = 100):
UpperCamelCase = 0
UpperCamelCase = 0
for i in range(1, n + 1):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 212 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
snake_case_ : Optional[Any] = 'pytorch_model.bin'
snake_case_ : Union[str, Any] = 'pytorch_model.bin.index.json'
snake_case_ : Optional[Any] = 'adapter_config.json'
snake_case_ : List[str] = 'adapter_model.bin'
snake_case_ : Any = 'adapter_model.safetensors'
snake_case_ : Optional[int] = 'tf_model.h5'
snake_case_ : List[Any] = 'tf_model.h5.index.json'
snake_case_ : Any = 'model.ckpt'
snake_case_ : Optional[Any] = 'flax_model.msgpack'
snake_case_ : List[str] = 'flax_model.msgpack.index.json'
snake_case_ : List[str] = 'model.safetensors'
snake_case_ : Any = 'model.safetensors.index.json'
snake_case_ : Any = 'config.json'
snake_case_ : Optional[Any] = 'preprocessor_config.json'
snake_case_ : List[Any] = FEATURE_EXTRACTOR_NAME
snake_case_ : Optional[int] = 'generation_config.json'
snake_case_ : Any = 'modelcard.json'
snake_case_ : Optional[int] = '▁'
snake_case_ : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
snake_case_ : Union[str, Any] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
snake_case_ : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
snake_case_ : Union[str, Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __snake_case ( _UpperCAmelCase : int):
if version.parse(_UpperCAmelCase) < version.parse(_UpperCAmelCase):
if "dev" in min_version:
UpperCamelCase = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
UpperCamelCase = f'This example requires a minimum version of {min_version},'
error_message += f' but the version found is {__version__}.\n'
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''')
| 212 | 1 |
lowerCamelCase : str = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase : Tuple = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase : Dict = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 290 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Any = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : int = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : List[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : List[str] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : List[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 290 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 500 |
from __future__ import annotations
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = text, pattern
_lowerCAmelCase , _lowerCAmelCase : int = len(__a), len(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if char == self.pattern[i]:
return i
return -1
def snake_case__ ( self, __a):
'''simple docstring'''
for i in range(self.patLen - 1, -1, -1):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(self.textLen - self.patLen + 1):
_lowerCAmelCase : Dict = self.mismatch_in_text(__a)
if mismatch_index == -1:
positions.append(__a)
else:
_lowerCAmelCase : List[str] = self.match_in_pattern(self.text[mismatch_index])
_lowerCAmelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_snake_case = "ABAABA"
_snake_case = "AB"
_snake_case = BoyerMooreSearch(text, pattern)
_snake_case = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 500 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowerCAmelCase ( _UpperCAmelCase ):
UpperCAmelCase__ = """ctrl"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCAmelCase : int=246534 , UpperCAmelCase : str=256 , UpperCAmelCase : Dict=1280 , UpperCAmelCase : List[str]=8192 , UpperCAmelCase : Tuple=48 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : List[str]=1e-6 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : List[str]=True , **UpperCAmelCase : List[str] , ) -> Union[str, Any]:
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Any = n_positions
lowerCamelCase__ : Any = n_embd
lowerCamelCase__ : Any = n_layer
lowerCamelCase__ : Optional[int] = n_head
lowerCamelCase__ : List[Any] = dff
lowerCamelCase__ : List[str] = resid_pdrop
lowerCamelCase__ : List[Any] = embd_pdrop
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : Optional[Any] = use_cache
super().__init__(**lowercase__ )
| 702 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
@flax_register_to_config
class lowerCAmelCase ( nn.Module, __UpperCamelCase, __UpperCamelCase ):
UpperCAmelCase__ = 32
UpperCAmelCase__ = 4
UpperCAmelCase__ = 4
UpperCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
UpperCAmelCase__ = False
UpperCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
UpperCAmelCase__ = 2
UpperCAmelCase__ = 8
UpperCAmelCase__ = None
UpperCAmelCase__ = 12_80
UpperCAmelCase__ = 0.0
UpperCAmelCase__ = False
UpperCAmelCase__ = jnp.floataa
UpperCAmelCase__ = True
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
def A_ ( self : Tuple , UpperCAmelCase : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase__ : int = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : List[str] = jnp.zeros(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase__ : Dict = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = jax.random.split(UpperCAmelCase )
lowerCamelCase__ : Dict = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )["params"]
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Any = self.block_out_channels
lowerCamelCase__ : int = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : Tuple = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase__ : Optional[int] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase__ : int = FlaxTimestepEmbedding(UpperCAmelCase , dtype=self.dtype )
lowerCamelCase__ : Optional[int] = self.only_cross_attention
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : Dict = output_channel
lowerCamelCase__ : Optional[int] = block_out_channels[i]
lowerCamelCase__ : List[Any] = i == len(UpperCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : str = FlaxDownBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCAmelCase )
lowerCamelCase__ : List[Any] = down_blocks
# mid
lowerCamelCase__ : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Any = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : int = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Tuple = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
lowerCamelCase__ : str = output_channel
lowerCamelCase__ : int = reversed_block_out_channels[i]
lowerCamelCase__ : int = reversed_block_out_channels[min(i + 1 , len(UpperCAmelCase ) - 1 )]
lowerCamelCase__ : Optional[Any] = i == len(UpperCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
lowerCamelCase__ : Tuple = FlaxCrossAttnUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
lowerCamelCase__ : Optional[Any] = FlaxUpBlockaD(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , prev_output_channel=UpperCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Tuple = up_blocks
# out
lowerCamelCase__ : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowerCamelCase__ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(UpperCAmelCase , jnp.ndarray ):
lowerCamelCase__ : List[str] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[Any] = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : Any = jnp.expand_dims(UpperCAmelCase , 0 )
lowerCamelCase__ : List[str] = self.time_proj(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.time_embedding(UpperCAmelCase )
# 2. pre-process
lowerCamelCase__ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
lowerCamelCase__ : Optional[Any] = self.conv_in(UpperCAmelCase )
# 3. down
lowerCamelCase__ : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = down_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Any = down_block(UpperCAmelCase , UpperCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
lowerCamelCase__ : Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
UpperCAmelCase , UpperCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : str = new_down_block_res_samples
# 4. mid
lowerCamelCase__ : List[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
lowerCamelCase__ : str = down_block_res_samples[-(self.layers_per_block + 1) :]
lowerCamelCase__ : List[str] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase__ : List[Any] = up_block(
UpperCAmelCase , temb=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train , )
else:
lowerCamelCase__ : int = up_block(UpperCAmelCase , temb=UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , deterministic=not train )
# 6. post-process
lowerCamelCase__ : str = self.conv_norm_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = nn.silu(UpperCAmelCase )
lowerCamelCase__ : Any = self.conv_out(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = jnp.transpose(UpperCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=UpperCAmelCase )
| 188 | 0 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
SCREAMING_SNAKE_CASE : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
SCREAMING_SNAKE_CASE : Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
SCREAMING_SNAKE_CASE : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
SCREAMING_SNAKE_CASE : Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
SCREAMING_SNAKE_CASE : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
SCREAMING_SNAKE_CASE : Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case_ = dataset.filter(lambda SCREAMING_SNAKE_CASE__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case_ = int(eval_result * len(SCREAMING_SNAKE_CASE__ ) )
print(SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.sort('''probability''' , reverse=SCREAMING_SNAKE_CASE__ )
snake_case_ = dataset.select(range(SCREAMING_SNAKE_CASE__ ) )
snake_case_ = dataset.remove_columns(['''label''', '''probability'''] )
snake_case_ = dataset.rename_column('''prediction''' , '''label''' )
snake_case_ = dataset.map(lambda SCREAMING_SNAKE_CASE__ : {"label": idalabel[example["label"]]} )
snake_case_ = dataset.shuffle(seed=args.seed )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ )
else:
dataset.to_json(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
snake_case_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case_ = STModelArguments(model_name_or_path=SCREAMING_SNAKE_CASE__ )
snake_case_ = STDataArguments(train_file=SCREAMING_SNAKE_CASE__ , infer_file=SCREAMING_SNAKE_CASE__ )
snake_case_ = STTrainingArguments(output_dir=SCREAMING_SNAKE_CASE__ )
snake_case_ = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(SCREAMING_SNAKE_CASE__ ).items():
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Sanity checks
snake_case_ = {}
snake_case_ = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case_ = args.train_file
snake_case_ = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case_ = args.eval_file
for key in data_files:
snake_case_ = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
snake_case_ = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
snake_case_ = F'''{args.output_dir}/self-train_iter-{{}}'''.format
snake_case_ = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = None
snake_case_ = None
snake_case_ = 0
snake_case_ = False
# Show the progress bar
snake_case_ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case_ = data_dir_format(SCREAMING_SNAKE_CASE__ )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-1''' )
snake_case_ = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
arguments_dict.update({key: value} )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , SCREAMING_SNAKE_CASE__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''stage-2''' )
# Update arguments_dict
snake_case_ = model_path
snake_case_ = data_files['''train''']
snake_case_ = current_output_dir
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , SCREAMING_SNAKE_CASE__ )
finetune(**SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = iteration
snake_case_ = data_dir_format(iteration + 1 )
snake_case_ = AutoConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , '''best-checkpoint''' ) )
snake_case_ = config.idalabel
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-checkpoint.json''' )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f:
snake_case_ = float(json.load(SCREAMING_SNAKE_CASE__ )[args.eval_metric] )
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(SCREAMING_SNAKE_CASE__ )
# Loading the dataset from local csv or json files.
snake_case_ = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
snake_case_ = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
shutil.copy(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case_ = eval_result
if best_iteration is None:
snake_case_ = new_iteration
snake_case_ = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case_ = new_iteration
snake_case_ = new_eval_result
snake_case_ = 0
else:
if new_eval_result == best_eval_result:
snake_case_ = new_iteration
snake_case_ = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case_ = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , SCREAMING_SNAKE_CASE__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''eval_results_best-iteration.json''' ) , ) | 39 | 0 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = BartphoTokenizer
SCREAMING_SNAKE_CASE_ : int = False
SCREAMING_SNAKE_CASE_ : Tuple = True
def a_ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : str = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
_UpperCAmelCase : Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
_UpperCAmelCase : str = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
_UpperCAmelCase : List[Any] = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : str , **UpperCAmelCase_ : Any ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def a_ ( self : Any , UpperCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = '''This is a là test'''
_UpperCAmelCase : Union[str, Any] = '''This is a<unk><unk> test'''
return input_text, output_text
def a_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = BartphoTokenizer(UpperCAmelCase_ , self.monolingual_vocab_file , **self.special_tokens_map )
_UpperCAmelCase : List[str] = '''This is a là test'''
_UpperCAmelCase : Any = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
_UpperCAmelCase : List[str] = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
_UpperCAmelCase : str = tokens + [tokenizer.unk_token]
_UpperCAmelCase : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , UpperCAmelCase_ )
| 416 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Tuple = len(_UpperCamelCase )
_UpperCAmelCase : Dict = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_UpperCAmelCase : List[Any] = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_UpperCAmelCase : List[Any] = True
if a[i].islower():
_UpperCAmelCase : str = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = logging.get_logger()
# the current default level is logging.WARNING
A_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = logging.get_verbosity()
A_ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A_ = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out ,msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out ,'''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__snake_case ) as cl:
logger.warning(__snake_case )
self.assertEqual(cl.out ,msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__snake_case )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A_ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A_ = os.getenv('''TRANSFORMERS_VERBOSITY''' ,__snake_case )
A_ = logging.log_levels[env_level_str]
A_ = logging.get_verbosity()
self.assertEqual(
__snake_case ,__snake_case ,f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' ,)
# restore to the original level
A_ = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
A_ = logging.logging.getLogger()
with CaptureLogger(__snake_case ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' ,cl.out )
# no need to restore as nothing was changed
def __UpperCAmelCase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
A_ = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A_ = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out ,'''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__snake_case ) as cl:
logger.warning_advice(__snake_case )
self.assertEqual(cl.out ,msg + '''\n''' )
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 188 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Dict = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ["""pixel_values"""]
def __init__( self ,__snake_case = True ,__snake_case = None ,__snake_case = None ,__snake_case = PILImageResampling.BILINEAR ,__snake_case = True ,__snake_case = 1 / 2_5_5 ,__snake_case = True ,__snake_case = None ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
super().__init__(**__snake_case )
A_ = size if size is not None else {'''shortest_edge''': 3_8_4}
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
A_ = do_resize
A_ = size
# Default value set here for backwards compatibility where the value in config is None
A_ = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6
A_ = resample
A_ = do_rescale
A_ = rescale_factor
A_ = do_normalize
A_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case = PILImageResampling.BICUBIC ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A_ = size['''shortest_edge''']
if shortest_edge < 3_8_4:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A_ = int(shortest_edge / crop_pct )
A_ = get_resize_output_image_size(__snake_case ,size=__snake_case ,default_to_square=__snake_case )
A_ = resize(image=__snake_case ,size=__snake_case ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__snake_case ,size=(shortest_edge, shortest_edge) ,data_format=__snake_case ,**__snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__snake_case ,size=(shortest_edge, shortest_edge) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
return rescale(__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ,__snake_case ,__snake_case = None ,**__snake_case ,):
"""simple docstring"""
return normalize(__snake_case ,mean=__snake_case ,std=__snake_case ,data_format=__snake_case ,**__snake_case )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = None ,__snake_case = ChannelDimension.FIRST ,**__snake_case ,):
"""simple docstring"""
A_ = do_resize if do_resize is not None else self.do_resize
A_ = crop_pct if crop_pct is not None else self.crop_pct
A_ = resample if resample is not None else self.resample
A_ = do_rescale if do_rescale is not None else self.do_rescale
A_ = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ = do_normalize if do_normalize is not None else self.do_normalize
A_ = image_mean if image_mean is not None else self.image_mean
A_ = image_std if image_std is not None else self.image_std
A_ = size if size is not None else self.size
A_ = get_size_dict(__snake_case ,default_to_square=__snake_case )
A_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
A_ = [self.resize(image=__snake_case ,size=__snake_case ,crop_pct=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
A_ = [self.rescale(image=__snake_case ,scale=__snake_case ) for image in images]
if do_normalize:
A_ = [self.normalize(image=__snake_case ,mean=__snake_case ,std=__snake_case ) for image in images]
A_ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
A_ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 188 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
_snake_case = {
'''facebook/nllb-large-en-ro''': 1_0_2_4,
'''facebook/nllb-200-distilled-600M''': 1_0_2_4,
}
# fmt: off
_snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: Optional[Any] = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_: Optional[Any] = NllbTokenizer
SCREAMING_SNAKE_CASE_: List[int] = []
SCREAMING_SNAKE_CASE_: List[int] = []
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=False , **UpperCAmelCase_ : Any , ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
_lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , legacy_behaviour=UpperCAmelCase_ , **UpperCAmelCase_ , )
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
_lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
_lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase = src_lang if src_lang is not None else 'eng_Latn'
_lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self : Any ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
_lowerCAmelCase = tgt_lang_id
return inputs
def __lowerCamelCase ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "eng_Latn" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "fra_Latn" , **UpperCAmelCase_ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[Any] ) -> None:
"""simple docstring"""
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : Optional[int] , UpperCAmelCase_ : str ) -> None:
"""simple docstring"""
_lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase_ )
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
_lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __lowerCamelCase ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 491 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
snake_case_ = json.loads(open(SCREAMING_SNAKE_CASE__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
snake_case_ = args.output + '''.pt'''
snake_case_ = OrderedDict()
with tf.device('''/CPU:0''' ):
snake_case_ = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ = reader.get_tensor(SCREAMING_SNAKE_CASE__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
snake_case_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
snake_case_ = 8
snake_case_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/moe''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/softmlp/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
snake_case_ = key_name[-9:-7]
for i in range(16 ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
snake_case_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/mlp''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p1/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/ln''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/att''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
snake_case_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ = state[:, 0, :, :]
snake_case_ = state[:, 1, :, :]
snake_case_ = state[:, 2, :, :]
snake_case_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/o/kernel''' ):
snake_case_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
snake_case_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/an''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
snake_case_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
snake_case_ = '''model.%s.weight''' % nlayer
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
if key_name.startswith('''model/wte''' ):
snake_case_ = '''lm_head.weight'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/wob''' ):
snake_case_ = '''final_logits_bias'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = state.reshape((1, -1) )
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense/kernel":
snake_case_ = '''model.last_project.weight'''
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense_1/bias":
snake_case_ = '''model.last_project.bias'''
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , args.output )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowerCAmelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 39 |
__magic_name__ : str = 8.314_4598
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K')
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol')
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__magic_name__ : List[Any] = 300
__magic_name__ : Union[str, Any] = 28
__magic_name__ : str = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 280 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def a ( __a = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
UpperCamelCase__ :int = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCamelCase__ :List[Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCamelCase__ :List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""") | 716 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def a ( __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :str = {}
UpperCamelCase__ :Dict = os.path.join(__a , '''all_results.json''' )
if os.path.exists(__a ):
with open(__a , '''r''' ) as f:
UpperCamelCase__ :int = json.load(__a )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowercase ( A__ ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import xla_spawn
UpperCamelCase__ :Any = self.get_auto_remove_tmp_dir()
UpperCamelCase__ :List[str] = F'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ):
UpperCamelCase__ :List[Any] = time()
xla_spawn.main()
UpperCamelCase__ :List[Any] = time()
UpperCamelCase__ :Tuple = get_results(UpperCamelCase_ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
import xla_spawn
UpperCamelCase__ :Tuple = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(UpperCamelCase_ , '''argv''' , UpperCamelCase_ ):
xla_spawn.main() | 280 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "gpt_neox"
def __init__( self, lowerCamelCase__=5_0432, lowerCamelCase__=6144, lowerCamelCase__=44, lowerCamelCase__=64, lowerCamelCase__=2_4576, lowerCamelCase__="gelu", lowerCamelCase__=0.25, lowerCamelCase__=1_0000, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.1, lowerCamelCase__=2048, lowerCamelCase__=0.02, lowerCamelCase__=1e-5, lowerCamelCase__=True, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = vocab_size
A : Tuple = max_position_embeddings
A : Optional[Any] = hidden_size
A : Tuple = num_hidden_layers
A : Tuple = num_attention_heads
A : List[Any] = intermediate_size
A : int = hidden_act
A : Tuple = rotary_pct
A : int = rotary_emb_base
A : int = attention_dropout
A : List[str] = hidden_dropout
A : str = classifier_dropout
A : Optional[int] = initializer_range
A : List[Any] = layer_norm_eps
A : List[Any] = use_cache
A : Any = tie_word_embeddings
A : Tuple = use_parallel_residual
A : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def _lowerCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, lowerCamelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
A : Dict = self.rope_scaling.get("""type""", lowerCamelCase__ )
A : List[str] = self.rope_scaling.get("""factor""", lowerCamelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase__, lowerCamelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 662 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
return f'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase__ ) for s in shape] )}.npy'''
def _lowerCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 4, 64, 64), lowerCamelCase__=False ):
A : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return image
def _lowerCAmelCase ( self, lowerCamelCase__=False, lowerCamelCase__="CompVis/stable-diffusion-v1-4" ):
A : str = jnp.bfloataa if fpaa else jnp.floataa
A : Union[str, Any] = """bf16""" if fpaa else None
A , A : str = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase__, subfolder="""unet""", dtype=lowerCamelCase__, revision=lowerCamelCase__ )
return model, params
def _lowerCAmelCase ( self, lowerCamelCase__=0, lowerCamelCase__=(4, 77, 768), lowerCamelCase__=False ):
A : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
A : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase__, lowerCamelCase__ ) ), dtype=lowerCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""", fpaa=lowerCamelCase__ )
A : str = self.get_latents(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : int = self.get_encoder_hidden_states(lowerCamelCase__, fpaa=lowerCamelCase__ )
A : Optional[Any] = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : Dict = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A , A : Tuple = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""", fpaa=lowerCamelCase__ )
A : int = self.get_latents(lowerCamelCase__, shape=(4, 4, 96, 96), fpaa=lowerCamelCase__ )
A : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase__, shape=(4, 77, 1024), fpaa=lowerCamelCase__ )
A : Dict = model.apply(
{"""params""": params}, lowerCamelCase__, jnp.array(lowerCamelCase__, dtype=jnp.intaa ), encoder_hidden_states=lowerCamelCase__, ).sample
assert sample.shape == latents.shape
A : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
A : List[Any] = jnp.array(lowerCamelCase__, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase__, lowerCamelCase__, atol=1e-2 )
| 662 | 1 |
import numpy as np
def lowerCamelCase__ ( a : np.ndarray , a : np.ndarray , a : float = 1e-12 , a : int = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a )[0] == np.shape(a )[1]
# Ensure proper dimensionality.
assert np.shape(a )[0] == np.shape(a )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a ) == np.iscomplexobj(a )
a__ :List[str] = np.iscomplexobj(a )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
a__ :List[str] = False
a__ :str = 0
a__ :Dict = 0
a__ :Tuple = 1e12
while not convergence:
# Multiple matrix by the vector.
a__ :Tuple = np.dot(a , a )
# Normalize the resulting output vector.
a__ :List[Any] = w / np.linalg.norm(a )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
a__ :List[str] = vector.conj().T if is_complex else vector.T
a__ :Any = np.dot(a , np.dot(a , a ) )
# Check convergence.
a__ :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
a__ :List[str] = True
a__ :Optional[int] = lambda_
if is_complex:
a__ :Union[str, Any] = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
a__ :List[str] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
a__ :Dict = np.array([41, 4, 20] )
a__ :Union[str, Any] = real_input_matrix.astype(np.complexaaa )
a__ :Optional[Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
a__ :Union[str, Any] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
a__ :Union[str, Any] = real_input_matrix
a__ :int = real_vector
elif problem_type == "complex":
a__ :Optional[Any] = complex_input_matrix
a__ :List[Any] = complex_vector
# Our implementation.
a__ :Any = power_iteration(a , a )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
a__ :int = np.linalg.eigh(a )
# Last eigenvalue is the maximum one.
a__ :int = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
a__ :Optional[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a ) - np.abs(a ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 719 |
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ) ->Any:
"""simple docstring"""
a__ :Optional[Any] = []
def _snake_case ( self : Optional[Any] , __A : List[Any] ) ->List[str]:
"""simple docstring"""
return self.node_position[vertex]
def _snake_case ( self : Optional[Any] , __A : str , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Dict = pos
def _snake_case ( self : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] ) ->List[Any]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
a__ :str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
a__ :Optional[int] = 2 * start + 1
else:
a__ :List[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
a__ , a__ :Optional[Any] = heap[smallest_child], positions[smallest_child]
a__ , a__ :int = (
heap[start],
positions[start],
)
a__ , a__ :List[Any] = temp, tempa
a__ :Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __A )
self.top_to_bottom(__A , __A , __A , __A )
def _snake_case ( self : List[str] , __A : Any , __A : List[str] , __A : Any , __A : str ) ->Optional[Any]:
"""simple docstring"""
a__ :Optional[Any] = position[index]
while index != 0:
a__ :str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
a__ :int = heap[parent]
a__ :Optional[Any] = position[parent]
self.set_position(position[parent] , __A )
else:
a__ :List[Any] = val
a__ :List[Any] = temp
self.set_position(__A , __A )
break
a__ :Union[str, Any] = parent
else:
a__ :int = val
a__ :Dict = temp
self.set_position(__A , 0 )
def _snake_case ( self : Tuple , __A : int , __A : int ) ->Union[str, Any]:
"""simple docstring"""
a__ :Tuple = len(__A ) // 2 - 1
for i in range(__A , -1 , -1 ):
self.top_to_bottom(__A , __A , len(__A ) , __A )
def _snake_case ( self : List[Any] , __A : List[Any] , __A : int ) ->Optional[Any]:
"""simple docstring"""
a__ :Any = positions[0]
a__ :str = sys.maxsize
self.top_to_bottom(__A , 0 , len(__A ) , __A )
return temp
def lowerCamelCase__ ( a : Any ) -> Union[str, Any]:
"""simple docstring"""
a__ :Tuple = Heap()
a__ :List[Any] = [0] * len(a )
a__ :str = [-1] * len(a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
a__ :Any = [] # Heap of Distance of vertices from their neighboring vertex
a__ :int = []
for vertex in range(len(a ) ):
distance_tv.append(sys.maxsize )
positions.append(a )
heap.node_position.append(a )
a__ :Tuple = []
a__ :Any = 1
a__ :int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
a__ :int = 0
a__ :List[str] = distance
heap.heapify(a , a )
for _ in range(1 , len(a ) ):
a__ :Dict = heap.delete_minimum(a , a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
a__ :Optional[int] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(a )]
):
a__ :List[str] = distance
heap.bottom_to_top(
a , heap.get_position(a ) , a , a )
a__ :str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
snake_case__ = int(input('''Enter number of edges: ''').strip())
snake_case__ = defaultdict(list)
for _ in range(edges_number):
snake_case__ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 373 | 0 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCamelCase__ ):
lowerCamelCase__ = (UnCLIPScheduler,)
def __a ( self , **snake_case_ ) -> Tuple:
SCREAMING_SNAKE_CASE : str ={
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**snake_case_ )
return config
def __a ( self ) -> Tuple:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __a ( self ) -> int:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case_ )
def __a ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def __a ( self ) -> Union[str, Any]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case_ )
def __a ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case_ )
def __a ( self ) -> List[Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case_ , prev_timestep=snake_case_ )
def __a ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict =self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE : int =scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE : Tuple =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Dict =self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE : Optional[Any] =scheduler_class(**snake_case_ )
SCREAMING_SNAKE_CASE : Dict =0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case_ ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case_ ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case_ ) - -0.001_0011 < 1E-5
def __a ( self ) -> int:
SCREAMING_SNAKE_CASE : str =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[int] =scheduler_class(**snake_case_ )
SCREAMING_SNAKE_CASE : Dict =scheduler.timesteps
SCREAMING_SNAKE_CASE : Optional[Any] =self.dummy_model()
SCREAMING_SNAKE_CASE : str =self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[Any] =torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Dict =model(snake_case_ , snake_case_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Optional[int] =scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] =pred_prev_sample
SCREAMING_SNAKE_CASE : List[str] =torch.sum(torch.abs(snake_case_ ) )
SCREAMING_SNAKE_CASE : Tuple =torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __a ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Dict =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] =scheduler_class(**snake_case_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE : int =scheduler.timesteps
SCREAMING_SNAKE_CASE : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE : Dict =self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Optional[Any] =torch.manual_seed(0 )
for i, t in enumerate(snake_case_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : str =model(snake_case_ , snake_case_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE : Optional[Any] =None
else:
SCREAMING_SNAKE_CASE : List[str] =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : List[Any] =scheduler.step(
snake_case_ , snake_case_ , snake_case_ , prev_timestep=snake_case_ , generator=snake_case_ ).prev_sample
SCREAMING_SNAKE_CASE : Any =pred_prev_sample
SCREAMING_SNAKE_CASE : Union[str, Any] =torch.sum(torch.abs(snake_case_ ) )
SCREAMING_SNAKE_CASE : List[Any] =torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __a ( self ) -> str:
pass
def __a ( self ) -> Optional[Any]:
pass
| 258 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase__ = CTRLTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = False
def __a ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Optional[int] =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
SCREAMING_SNAKE_CASE : Dict =dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
SCREAMING_SNAKE_CASE : List[str] ={'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(snake_case_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def __a ( self , **snake_case_ ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def __a ( self , snake_case_ ) -> List[str]:
SCREAMING_SNAKE_CASE : str ='''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : List[str] ='''adapt react readapt apt'''
return input_text, output_text
def __a ( self ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE : Optional[int] ='''adapt react readapt apt'''
SCREAMING_SNAKE_CASE : Optional[int] ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
SCREAMING_SNAKE_CASE : Optional[int] =tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : Any =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : Tuple =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 258 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCAmelCase ( __A : Optional[int] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCAmelCase ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
a_ : Optional[int] = [1, 2, 3]
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend('''unsupported backend''' ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _UpperCAmelCase ( __A : Optional[Any] ):
a_ : List[Any] = [1, 2]
a_ : List[Any] = {'''a''': 1, '''b''': 2}
a_ : Union[str, Any] = {'''a''': [1, 2], '''b''': [3, 4]}
a_ : Any = {'''a''': {'''1''': 1}, '''b''': 2}
a_ : Union[str, Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
a_ : str = [2, 3]
a_ : Optional[int] = {'''a''': 2, '''b''': 3}
a_ : List[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
a_ : List[str] = {'''a''': {'''1''': 2}, '''b''': 3}
a_ : Optional[int] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 666 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
snake_case__ = LongformerTokenizer
snake_case__ = True
snake_case__ = LongformerTokenizerFast
snake_case__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a_ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a_ : Optional[Any] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
a_ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a_ : Any = {'''unk_token''': '''<unk>'''}
a_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self : Any , **__SCREAMING_SNAKE_CASE : Any ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
a_ : Union[str, Any] = '''lower newer'''
a_ : List[Any] = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
a_ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a_ : List[str] = '''lower newer'''
a_ : str = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a_ : Optional[int] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokens + [tokenizer.unk_token]
a_ : Any = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
a_ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
a_ : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
a_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
a_ : str = self.get_tokenizer()
a_ : int = '''Encode this sequence.'''
a_ : List[str] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
a_ : Dict = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
a_ : Optional[Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = '''Encode <mask> sequence'''
a_ : List[str] = '''Encode <mask>sequence'''
a_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = encoded.index(__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : Any = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
a_ : str = '''A, <mask> AllenNLP sentence.'''
a_ : List[Any] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
a_ : Dict = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
a_ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
a_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a_ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a_ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a_ : Union[str, Any] = f'{text_of_1_token} {text_of_1_token}'
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Optional[int] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Tuple = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : Any = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Union[str, Any] = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : int = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
a_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
a_ : int = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 666 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ :List[Any] = {
"""microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'cvt'
def __init__( self : List[Any] , A__ : Tuple=3 , A__ : Optional[Any]=[7, 3, 3] , A__ : List[str]=[4, 2, 2] , A__ : int=[2, 1, 1] , A__ : Optional[int]=[64, 192, 384] , A__ : List[str]=[1, 3, 6] , A__ : Optional[int]=[1, 2, 10] , A__ : List[Any]=[4.0, 4.0, 4.0] , A__ : Any=[0.0, 0.0, 0.0] , A__ : List[Any]=[0.0, 0.0, 0.0] , A__ : Union[str, Any]=[0.0, 0.0, 0.1] , A__ : Any=[True, True, True] , A__ : List[Any]=[False, False, True] , A__ : Dict=["dw_bn", "dw_bn", "dw_bn"] , A__ : str=[3, 3, 3] , A__ : Any=[1, 1, 1] , A__ : Dict=[2, 2, 2] , A__ : int=[1, 1, 1] , A__ : Dict=[1, 1, 1] , A__ : int=0.02 , A__ : str=1e-1_2 , **A__ : str , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[str] = patch_sizes
__lowerCamelCase : List[str] = patch_stride
__lowerCamelCase : Any = patch_padding
__lowerCamelCase : str = embed_dim
__lowerCamelCase : Optional[int] = num_heads
__lowerCamelCase : Any = depth
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : List[str] = attention_drop_rate
__lowerCamelCase : Dict = drop_rate
__lowerCamelCase : Optional[int] = drop_path_rate
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : int = cls_token
__lowerCamelCase : List[str] = qkv_projection_method
__lowerCamelCase : int = kernel_qkv
__lowerCamelCase : str = padding_kv
__lowerCamelCase : str = stride_kv
__lowerCamelCase : Any = padding_q
__lowerCamelCase : str = stride_q
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : int = layer_norm_eps
| 150 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCAmelCase__ :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Optional[int] , A__ : str , A__ : Optional[Any]=768 ):
"""simple docstring"""
super().__init__(A__ )
__lowerCamelCase : int = proj_size
__lowerCamelCase : List[Any] = CLIPVisionModel(A__ )
__lowerCamelCase : Tuple = PaintByExampleMapper(A__ )
__lowerCamelCase : Union[str, Any] = nn.LayerNorm(config.hidden_size )
__lowerCamelCase : Optional[int] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCamelCase : Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def a_ ( self : Optional[int] , A__ : Any , A__ : List[Any]=False ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = self.model(pixel_values=A__ )
__lowerCamelCase : int = clip_output.pooler_output
__lowerCamelCase : List[Any] = self.mapper(latent_states[:, None] )
__lowerCamelCase : Any = self.final_layer_norm(A__ )
__lowerCamelCase : Union[str, Any] = self.proj_out(A__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[str] , A__ : Any ):
"""simple docstring"""
super().__init__()
__lowerCamelCase : str = (config.num_hidden_layers + 1) // 5
__lowerCamelCase : Union[str, Any] = config.hidden_size
__lowerCamelCase : List[str] = 1
__lowerCamelCase : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(A__ , A__ , A__ , activation_fn="""gelu""" , attention_bias=A__ )
for _ in range(A__ )
] )
def a_ ( self : Tuple , A__ : Any ):
"""simple docstring"""
for block in self.blocks:
__lowerCamelCase : Dict = block(A__ )
return hidden_states
| 150 | 1 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ = 6008_5147_5143 ):
try:
A__ = int(lowerCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A__ = 2
A__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A__ = i
while n % i == 0:
A__ = n // i
i += 1
return int(lowerCAmelCase__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 710 |
"""simple docstring"""
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = '''ybelkada/fonts'''
def __lowerCamelCase ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
requires_backends(lowerCAmelCase__ ,['torch'] )
_check_torch_version()
A__ = image_tensor.unsqueeze(0 )
A__ = torch.nn.functional.unfold(lowerCAmelCase__ ,(patch_height, patch_width) ,stride=(patch_height, patch_width) )
A__ = patches.reshape(image_tensor.size(0 ) ,image_tensor.size(1 ) ,lowerCAmelCase__ ,lowerCAmelCase__ ,-1 )
A__ = patches.permute(0 ,4 ,2 ,3 ,1 ).reshape(
image_tensor.size(2 ) // patch_height ,image_tensor.size(3 ) // patch_width ,image_tensor.size(1 ) * patch_height * patch_width ,)
return patches.unsqueeze(0 )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = 36 ,lowerCAmelCase__ = "black" ,lowerCAmelCase__ = "white" ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = 5 ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,):
requires_backends(lowerCAmelCase__ ,'vision' )
# Add new lines so that each line is no more than 80 characters.
A__ = textwrap.TextWrapper(width=80 )
A__ = wrapper.wrap(text=lowerCAmelCase__ )
A__ = '\n'.join(lowerCAmelCase__ )
if font_bytes is not None and font_path is None:
A__ = io.BytesIO(lowerCAmelCase__ )
elif font_path is not None:
A__ = font_path
else:
A__ = hf_hub_download(lowerCAmelCase__ ,'Arial.TTF' )
A__ = ImageFont.truetype(lowerCAmelCase__ ,encoding='UTF-8' ,size=lowerCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
A__ = ImageDraw.Draw(Image.new('RGB' ,(1, 1) ,lowerCAmelCase__ ) )
A__ , A__ , A__ , A__ = temp_draw.textbbox((0, 0) ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Create the actual image with a bit of padding around the text.
A__ = text_width + left_padding + right_padding
A__ = text_height + top_padding + bottom_padding
A__ = Image.new('RGB' ,(image_width, image_height) ,lowerCAmelCase__ )
A__ = ImageDraw.Draw(lowerCAmelCase__ )
draw.text(xy=(left_padding, top_padding) ,text=lowerCAmelCase__ ,fill=lowerCAmelCase__ ,font=lowerCAmelCase__ )
return image
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ):
requires_backends(lowerCAmelCase__ ,'vision' )
# Convert to PIL image if necessary
A__ = to_pil_image(lowerCAmelCase__ )
A__ = render_text(lowerCAmelCase__ ,**lowerCAmelCase__ )
A__ = max(header_image.width ,image.width )
A__ = int(image.height * (new_width / image.width) )
A__ = int(header_image.height * (new_width / header_image.width) )
A__ = Image.new('RGB' ,(new_width, new_height + new_header_height) ,'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) ,(0, 0) )
new_image.paste(image.resize((new_width, new_height) ) ,(0, new_header_height) )
# Convert back to the original framework if necessary
A__ = to_numpy_array(lowerCAmelCase__ )
if infer_channel_dimension_format(lowerCAmelCase__ ) == ChannelDimension.LAST:
A__ = to_channel_dimension_format(lowerCAmelCase__ ,ChannelDimension.LAST )
return new_image
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[int] = ["""flattened_patches"""]
def __init__( self , __a = True , __a = True , __a = None , __a = 2048 , __a = False , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = patch_size if patch_size is not None else {'height': 16, 'width': 16}
A__ = do_normalize
A__ = do_convert_rgb
A__ = max_patches
A__ = is_vqa
def _UpperCAmelCase ( self , __a , __a , __a , **__a ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
A__ = to_channel_dimension_format(__a , ChannelDimension.FIRST )
A__ = torch.from_numpy(__a )
A__ , A__ = patch_size['height'], patch_size['width']
A__ , A__ = get_image_size(__a )
# maximize scale s.t.
A__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
A__ = max(min(math.floor(scale * image_height / patch_height ) , __a ) , 1 )
A__ = max(min(math.floor(scale * image_width / patch_width ) , __a ) , 1 )
A__ = max(num_feasible_rows * patch_height , 1 )
A__ = max(num_feasible_cols * patch_width , 1 )
A__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=__a , antialias=__a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
A__ = torch_extract_patches(__a , __a , __a )
A__ = patches.shape
A__ = patches_shape[1]
A__ = patches_shape[2]
A__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
A__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
A__ = torch.arange(__a ).reshape([rows, 1] ).repeat(1 , __a ).reshape([rows * columns, 1] )
A__ = torch.arange(__a ).reshape([1, columns] ).repeat(__a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
A__ = row_ids.to(torch.floataa )
A__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
A__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
A__ = torch.nn.functional.pad(__a , [0, 0, 0, max_patches - (rows * columns)] ).float()
A__ = to_numpy_array(__a )
return result
def _UpperCAmelCase ( self , __a , __a = None , **__a ):
"""simple docstring"""
if image.dtype == np.uinta:
A__ = image.astype(np.floataa )
# take mean across the whole `image`
A__ = np.mean(__a )
A__ = np.std(__a )
A__ = max(__a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(__a , mean=__a , std=__a , **__a )
def _UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
"""simple docstring"""
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ = patch_size if patch_size is not None else self.patch_size
A__ = max_patches if max_patches is not None else self.max_patches
A__ = self.is_vqa
if kwargs.get('data_format' , __a ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
A__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
A__ = kwargs.pop('font_bytes' , __a )
A__ = kwargs.pop('font_path' , __a )
if isinstance(__a , __a ):
A__ = [header_text] * len(__a )
A__ = [
render_header(__a , header_text[i] , font_bytes=__a , font_path=__a )
for i, image in enumerate(__a )
]
if do_normalize:
A__ = [self.normalize(image=__a ) for image in images]
# convert to torch tensor and permute
A__ = [
self.extract_flattened_patches(image=__a , max_patches=__a , patch_size=__a )
for image in images
]
# create attention mask in numpy
A__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
A__ = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=__a )
return encoded_outputs
| 554 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 7 |
"""simple docstring"""
def A_ ( snake_case__ , snake_case__ = " " ) -> list:
_UpperCamelCase :List[str] = []
_UpperCamelCase :int = 0
for index, char in enumerate(snake_case__ ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCamelCase :Dict = index + 1
elif index + 1 == len(snake_case__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 355 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
def lowercase__ ( self):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def lowercase__ ( self):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = MPNetModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCAmelCase_ = model(UpperCAmelCase_ , UpperCAmelCase_)
lowerCAmelCase_ = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = MPNetForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCAmelCase_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MPNetForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCAmelCase_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.num_choices
lowerCAmelCase_ = MPNetForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = MPNetForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCAmelCase_ = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowercase__ ( self):
lowerCAmelCase_ = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a :int = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a :Any = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a :int = False
a :List[str] = True
def lowercase__ ( self):
lowerCAmelCase_ = MPNetModelTester(self)
lowerCAmelCase_ = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37)
def lowercase__ ( self):
self.config_tester.run_common_tests()
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_)
def lowercase__ ( self):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self):
lowerCAmelCase_ = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
lowerCAmelCase_ = model(UpperCAmelCase_)[0]
lowerCAmelCase_ = torch.Size((1, 11, 768))
self.assertEqual(output.shape , UpperCAmelCase_)
lowerCAmelCase_ = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 711 |
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = value
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase):
lowerCAmelCase_ = tree
def lowercase__ ( self , _UpperCAmelCase):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left) + self.depth_first_search(node.right)
)
def __iter__( self):
yield self.depth_first_search(self.tree)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__a :Tuple = logging.get_logger(__name__)
__a :Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__a :Any = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
__a :Union[str, Any] = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_lowerCamelCase : Tuple = RobertaTokenizer
def __init__( self : int , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]="replace" , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Optional[Any]="<s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[int]=True , **UpperCAmelCase : Tuple , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
A_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
A_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
A_ = add_prefix_space
A_ = pre_tok_class(**UpperCAmelCase )
A_ = add_prefix_space
A_ = "post_processor"
A_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
A_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ = tuple(state["sep"] )
if "cls" in state:
A_ = tuple(state["cls"] )
A_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
A_ = add_prefix_space
A_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
A_ = trim_offsets
A_ = True
if changes_to_apply:
A_ = getattr(UpperCAmelCase , state.pop("type" ) )
A_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def __A ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __A ( self : List[Any] , UpperCAmelCase : str ):
A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
A_ = value
def __A ( self : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : List[str] ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ):
A_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=None ):
A_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 86 |
from typing import Any
import numpy as np
def _a ( a :np.ndarray ) -> bool:
return np.array_equal(a , matrix.conjugate().T )
def _a ( a :np.ndarray , a :np.ndarray ) -> Any:
a = v.conjugate().T
a = v_star.dot(a )
assert isinstance(a , np.ndarray )
return (v_star_dot.dot(a )) / (v_star.dot(a ))
def _a ( ) -> None:
a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
a = np.array([[1], [2], [3]] )
assert is_hermitian(a ), F"""{a} is not hermitian."""
print(rayleigh_quotient(a , a ) )
a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(a ), F"""{a} is not hermitian."""
assert rayleigh_quotient(a , a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 117 | 0 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 302 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 302 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(
_lowercase , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowerCAmelCase ( _lowercase ):
def __magic_name__( self , __UpperCAmelCase ):
if self.framework == "tf":
lowerCAmelCase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase__ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Any = self.get_masked_index(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __magic_name__( self , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__UpperCAmelCase )
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
if return_tensors is None:
lowerCAmelCase__ : Union[str, Any] = self.framework
lowerCAmelCase__ : str = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase )
self.ensure_exactly_one_mask_token(__UpperCAmelCase )
return model_inputs
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = self.model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model_inputs['''input_ids''']
return model_outputs
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=5 , __UpperCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase__ : Union[str, Any] = target_ids.shape[0]
lowerCAmelCase__ : str = model_outputs['''input_ids'''][0]
lowerCAmelCase__ : str = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase__ : Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase__ : Optional[Any] = outputs.numpy()
lowerCAmelCase__ : Tuple = outputs[0, masked_index, :]
lowerCAmelCase__ : Union[str, Any] = stable_softmax(__UpperCAmelCase , axis=-1 )
if target_ids is not None:
lowerCAmelCase__ : Any = tf.gather_nd(tf.squeeze(__UpperCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase__ : List[str] = tf.expand_dims(__UpperCAmelCase , 0 )
lowerCAmelCase__ : int = tf.math.top_k(__UpperCAmelCase , k=__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase__ : Dict = outputs[0, masked_index, :]
lowerCAmelCase__ : Any = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase__ : Optional[int] = probs[..., target_ids]
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = probs.topk(__UpperCAmelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Union[str, Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase__ : Dict = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase__ : Optional[int] = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase__ : str = target_ids[p].tolist()
lowerCAmelCase__ : Any = p
# Filter padding out:
lowerCAmelCase__ : Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase__ : Any = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ : List[str] = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__UpperCAmelCase )
result.append(__UpperCAmelCase )
if single_mask:
return result[0]
return result
def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=None ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = [targets]
try:
lowerCAmelCase__ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : Any = []
for target in targets:
lowerCAmelCase__ : Union[str, Any] = vocab.get(__UpperCAmelCase , __UpperCAmelCase )
if id_ is None:
lowerCAmelCase__ : Optional[Any] = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , max_length=1 , truncation=__UpperCAmelCase , )['''input_ids''']
if len(__UpperCAmelCase ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase__ : Tuple = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowerCAmelCase__ : Any = list(set(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase__ : str = np.array(__UpperCAmelCase )
return target_ids
def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
lowerCAmelCase__ : List[Any] = {}
if targets is not None:
lowerCAmelCase__ : List[Any] = self.get_target_ids(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = target_ids
if top_k is not None:
lowerCAmelCase__ : Union[str, Any] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
| 678 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = 0
if start < end:
lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : List[Any] = a[pivot]
lowerCAmelCase__ : str = temp
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase )
count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 )
count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase )
return count
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = a[end]
lowerCAmelCase__ : Optional[int] = a[pivot]
lowerCAmelCase__ : List[str] = temp
lowerCAmelCase__ : str = start - 1
for index in range(UpperCamelCase , UpperCamelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ : List[str] = new_pivot_index + 1
lowerCAmelCase__ : int = a[new_pivot_index]
lowerCAmelCase__ : int = a[index]
lowerCAmelCase__ : Tuple = temp
lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1]
lowerCAmelCase__ : List[str] = a[end]
lowerCAmelCase__ : Union[str, Any] = temp
return new_pivot_index + 1, count
lowerCAmelCase_ = TemporaryFile()
lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted
lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation
lowerCAmelCase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
lowerCAmelCase_ = np.load(outfile)
lowerCAmelCase_ = len(M) - 1
lowerCAmelCase_ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 678 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 719 | '''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_A : List[str] = logging.get_logger(__name__)
_A : List[Any] = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_A : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_A : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = """whisper"""
_SCREAMING_SNAKE_CASE : List[Any] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_18_65 , SCREAMING_SNAKE_CASE__ : List[str]=80 , SCREAMING_SNAKE_CASE__ : Tuple=6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : Optional[Any]=15_36 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_57 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=2_56 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=15_00 , SCREAMING_SNAKE_CASE__ : Optional[int]=4_48 , SCREAMING_SNAKE_CASE__ : Dict=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=5_02_56 , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : int=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Tuple=2_56 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=0.0_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=10 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=10 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Tuple=7 , **SCREAMING_SNAKE_CASE__ : int , ) -> Dict:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
__lowerCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
__lowerCAmelCase = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , suppress_tokens=SCREAMING_SNAKE_CASE__ , begin_suppress_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def a ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
__lowerCAmelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
__lowerCAmelCase = {0: """batch"""}
else:
__lowerCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
return common_inputs
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : int = -1 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None , SCREAMING_SNAKE_CASE__ : int = 2_20_50 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 2_20 , ) -> Mapping[str, Any]:
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , time_duration=SCREAMING_SNAKE_CASE__ , frequency=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase = encoder_inputs["""input_features"""].shape[2]
__lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_inputs.pop("""input_features""" )
__lowerCAmelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
__lowerCAmelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def a ( self : Optional[int] ) -> float:
return 1e-3
| 330 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.